diff --git a/go.mod b/go.mod
index b95649c5c1..3b09a45fe2 100644
--- a/go.mod
+++ b/go.mod
@@ -5,82 +5,81 @@ go 1.21.0
toolchain go1.21.4
require (
- github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.98.0
- github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.98.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.102.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.102.0
github.com/pkg/errors v0.9.1
github.com/spf13/cobra v1.8.0
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.9.0
- go.opentelemetry.io/collector/component v0.98.0
- go.opentelemetry.io/collector/confmap v0.98.0
- go.opentelemetry.io/collector/confmap/converter/expandconverter v0.98.0
- go.opentelemetry.io/collector/confmap/provider/envprovider v0.98.0
- go.opentelemetry.io/collector/confmap/provider/fileprovider v0.98.0
- go.opentelemetry.io/collector/confmap/provider/httpprovider v0.98.0
- go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.98.0
- go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.98.0
- go.opentelemetry.io/collector/exporter v0.98.0
- go.opentelemetry.io/collector/exporter/loggingexporter v0.98.0
- go.opentelemetry.io/collector/exporter/otlpexporter v0.98.0
- go.opentelemetry.io/collector/exporter/otlphttpexporter v0.98.0
- go.opentelemetry.io/collector/extension v0.98.0
- go.opentelemetry.io/collector/extension/ballastextension v0.98.0
- go.opentelemetry.io/collector/extension/zpagesextension v0.98.0
- go.opentelemetry.io/collector/featuregate v1.5.0
- go.opentelemetry.io/collector/otelcol v0.98.0
- go.opentelemetry.io/collector/processor v0.98.0
- go.opentelemetry.io/collector/processor/batchprocessor v0.98.0
- go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.98.0
- go.opentelemetry.io/collector/receiver v0.98.0
- go.opentelemetry.io/collector/receiver/otlpreceiver v0.98.0
+ go.opentelemetry.io/collector/component v0.102.1
+ go.opentelemetry.io/collector/confmap v0.102.1
+ go.opentelemetry.io/collector/confmap/converter/expandconverter v0.102.1
+ go.opentelemetry.io/collector/confmap/provider/envprovider v0.102.1
+ go.opentelemetry.io/collector/confmap/provider/fileprovider v0.102.1
+ go.opentelemetry.io/collector/confmap/provider/httpprovider v0.102.1
+ go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.102.1
+ go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.102.1
+ go.opentelemetry.io/collector/exporter v0.102.1
+ go.opentelemetry.io/collector/exporter/loggingexporter v0.102.1
+ go.opentelemetry.io/collector/exporter/otlpexporter v0.102.1
+ go.opentelemetry.io/collector/exporter/otlphttpexporter v0.102.1
+ go.opentelemetry.io/collector/extension v0.102.1
+ go.opentelemetry.io/collector/extension/ballastextension v0.102.1
+ go.opentelemetry.io/collector/extension/zpagesextension v0.102.1
+ go.opentelemetry.io/collector/featuregate v1.9.0
+ go.opentelemetry.io/collector/otelcol v0.102.1
+ go.opentelemetry.io/collector/processor v0.102.1
+ go.opentelemetry.io/collector/processor/batchprocessor v0.102.1
+ go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.102.1
+ go.opentelemetry.io/collector/receiver v0.102.1
+ go.opentelemetry.io/collector/receiver/otlpreceiver v0.102.1
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
- golang.org/x/sys v0.19.0
+ golang.org/x/sys v0.20.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
)
require (
- cloud.google.com/go/compute v1.24.0 // indirect
- cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68 // indirect
+ cloud.google.com/go/compute/metadata v0.3.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
@@ -88,65 +87,112 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect
github.com/Code-Hex/go-generics-cache v1.3.1 // indirect
- github.com/DataDog/agent-payload/v5 v5.0.111 // indirect
- github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1-0.20240321095122-a3c5dbb936ae // indirect
- github.com/DataDog/datadog-agent/pkg/proto v0.52.1-0.20240321095122-a3c5dbb936ae // indirect
- github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.1-0.20240321095122-a3c5dbb936ae // indirect
- github.com/DataDog/datadog-agent/pkg/trace v0.52.1-0.20240321095122-a3c5dbb936ae // indirect
- github.com/DataDog/datadog-agent/pkg/util/cgroups v0.52.1-0.20240321095122-a3c5dbb936ae // indirect
- github.com/DataDog/datadog-agent/pkg/util/log v0.52.1-0.20240321095122-a3c5dbb936ae // indirect
- github.com/DataDog/datadog-agent/pkg/util/pointer v0.52.1-0.20240321095122-a3c5dbb936ae // indirect
- github.com/DataDog/datadog-agent/pkg/util/scrubber v0.52.1-0.20240321095122-a3c5dbb936ae // indirect
- github.com/DataDog/datadog-api-client-go/v2 v2.24.0 // indirect
+ github.com/DataDog/agent-payload/v5 v5.0.119 // indirect
+ github.com/DataDog/datadog-agent/comp/core/config v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/comp/core/log v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/comp/core/secrets v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/comp/core/telemetry v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/comp/def v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/comp/logs/agent/config v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient v0.0.0-20240525065430-d0b647bcb646 // indirect
+ github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/config/env v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/config/model v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/config/setup v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/config/utils v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/auditor v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/client v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/message v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/metrics v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/pipeline v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/processor v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/sds v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/sender v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/sources v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/obfuscate v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/proto v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/status/health v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/telemetry v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/trace v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/backoff v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/cgroups v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/executable v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/filesystem v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/fxutil v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/http v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/log v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/optional v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/pointer v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/scrubber v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/startstop v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/statstracker v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/system v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/system/socket v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/util/winutil v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-agent/pkg/version v0.54.0-rc.5 // indirect
+ github.com/DataDog/datadog-api-client-go/v2 v2.26.0 // indirect
github.com/DataDog/datadog-go/v5 v5.5.0 // indirect
+ github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240419161837-f1b2f553edfe // indirect
github.com/DataDog/go-sqllexer v0.0.9 // indirect
- github.com/DataDog/go-tuf v1.0.2-0.5.2 // indirect
+ github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee // indirect
- github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.13.4 // indirect
- github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.14.0 // indirect
- github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.13.4 // indirect
- github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.13.4 // indirect
- github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.13.4 // indirect
- github.com/DataDog/sketches-go v1.4.4 // indirect
+ github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.16.1 // indirect
+ github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1 // indirect
+ github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.16.1 // indirect
+ github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.16.1 // indirect
+ github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.16.1 // indirect
+ github.com/DataDog/sketches-go v1.4.5 // indirect
+ github.com/DataDog/viper v1.13.3 // indirect
github.com/DataDog/zstd v1.5.2 // indirect
- github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.22.0 // indirect
- github.com/IBM/sarama v1.43.1 // indirect
+ github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.23.0 // indirect
+ github.com/IBM/sarama v1.43.2 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Showmax/go-fqdn v1.0.0 // indirect
github.com/alecthomas/participle/v2 v2.1.1 // indirect
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
github.com/apache/thrift v0.20.0 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
- github.com/aws/aws-sdk-go v1.51.17 // indirect
- github.com/aws/aws-sdk-go-v2 v1.26.1 // indirect
+ github.com/aws/aws-sdk-go v1.53.11 // indirect
+ github.com/aws/aws-sdk-go-v2 v1.27.0 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect
- github.com/aws/aws-sdk-go-v2/config v1.27.11 // indirect
- github.com/aws/aws-sdk-go-v2/credentials v1.17.11 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect
+ github.com/aws/aws-sdk-go-v2/config v1.27.16 // indirect
+ github.com/aws/aws-sdk-go-v2/credentials v1.17.16 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.7 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.7 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
- github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.7 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect
- github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 // indirect
- github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.5 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 // indirect
- github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.9 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.7 // indirect
+ github.com/aws/aws-sdk-go-v2/service/s3 v1.54.3 // indirect
+ github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.10 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.20.9 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.3 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.28.10 // indirect
github.com/aws/smithy-go v1.20.2 // indirect
+ github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect
+ github.com/briandowns/spinner v1.23.0 // indirect
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/checkpoint-restore/go-criu/v5 v5.3.0 // indirect
github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect
github.com/cilium/ebpf v0.11.0 // indirect
- github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect
+ github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 // indirect
github.com/containerd/cgroups/v3 v3.0.3 // indirect
github.com/containerd/console v1.0.3 // indirect
github.com/containerd/ttrpc v1.2.2 // indirect
@@ -167,8 +213,8 @@ require (
github.com/envoyproxy/go-control-plane v0.12.0 // indirect
github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect
github.com/euank/go-kmsg-parser v2.0.0+incompatible // indirect
- github.com/expr-lang/expr v1.16.3 // indirect
- github.com/fatih/color v1.15.0 // indirect
+ github.com/expr-lang/expr v1.16.9 // indirect
+ github.com/fatih/color v1.16.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-kit/log v0.2.1 // indirect
@@ -179,7 +225,7 @@ require (
github.com/go-openapi/jsonpointer v0.20.2 // indirect
github.com/go-openapi/jsonreference v0.20.4 // indirect
github.com/go-openapi/swag v0.22.9 // indirect
- github.com/go-resty/resty/v2 v2.11.0 // indirect
+ github.com/go-resty/resty/v2 v2.12.0 // indirect
github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect
github.com/go-zookeeper/zk v1.0.3 // indirect
github.com/gobwas/glob v0.2.3 // indirect
@@ -205,8 +251,8 @@ require (
github.com/gorilla/mux v1.8.1 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect
- github.com/hashicorp/consul/api v1.28.2 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
+ github.com/hashicorp/consul/api v1.28.3 // indirect
github.com/hashicorp/cronexpr v1.1.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
@@ -216,18 +262,19 @@ require (
github.com/hashicorp/go-retryablehttp v0.7.4 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect
- github.com/hashicorp/go-version v1.6.0 // indirect
+ github.com/hashicorp/go-version v1.7.0 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
+ github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702 // indirect
github.com/hashicorp/serf v0.10.1 // indirect
+ github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect
github.com/hetznercloud/hcloud-go/v2 v2.6.0 // indirect
github.com/iancoleman/strcase v0.3.0 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
- github.com/influxdata/go-syslog/v3 v3.0.1-0.20230911200830-875f5bc594a4 // indirect
github.com/ionos-cloud/sdk-go/v6 v6.1.11 // indirect
- github.com/jaegertracing/jaeger v1.55.0 // indirect
+ github.com/jaegertracing/jaeger v1.57.0 // indirect
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
github.com/jcmturner/gofork v1.7.6 // indirect
@@ -237,19 +284,22 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
+ github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
github.com/karrick/godirwalk v1.17.0 // indirect
github.com/klauspost/compress v1.17.8 // indirect
github.com/knadh/koanf v1.5.0 // indirect
github.com/knadh/koanf/v2 v2.1.1 // indirect
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
- github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect
+ github.com/leodido/go-syslog/v4 v4.1.0 // indirect
+ github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b // indirect
github.com/lightstep/go-expohisto v1.0.0 // indirect
- github.com/linode/linodego v1.30.0 // indirect
+ github.com/linode/linodego v1.33.0 // indirect
github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect
+ github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
- github.com/mattn/go-isatty v0.0.19 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
github.com/miekg/dns v1.1.58 // indirect
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
@@ -260,41 +310,42 @@ require (
github.com/moby/sys/mountinfo v0.6.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
github.com/mostynb/go-grpc-compression v1.2.2 // indirect
github.com/mrunalp/fileutils v0.5.1 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.98.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.98.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.102.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.102.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/opencontainers/runc v1.1.12 // indirect
@@ -302,20 +353,21 @@ require (
github.com/opencontainers/selinux v1.10.0 // indirect
github.com/openshift/api v3.9.0+incompatible // indirect
github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 // indirect
- github.com/openzipkin/zipkin-go v0.4.2 // indirect
+ github.com/openzipkin/zipkin-go v0.4.3 // indirect
github.com/outcaste-io/ristretto v0.2.1 // indirect
github.com/ovh/go-ovh v1.4.3 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
+ github.com/pelletier/go-toml v1.7.0 // indirect
github.com/philhofer/fwd v1.1.2 // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect
- github.com/prometheus/client_golang v1.19.0 // indirect
+ github.com/prometheus/client_golang v1.19.1 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/common v0.52.3 // indirect
+ github.com/prometheus/common v0.53.0 // indirect
github.com/prometheus/common/sigv4 v0.1.0 // indirect
- github.com/prometheus/procfs v0.13.0 // indirect
+ github.com/prometheus/procfs v0.15.0 // indirect
github.com/prometheus/prometheus v0.51.2-0.20240405174432-b4a973753c6e // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/relvacode/iso8601 v1.4.0 // indirect
@@ -323,11 +375,15 @@ require (
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 // indirect
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect
- github.com/shirou/gopsutil/v3 v3.24.3 // indirect
+ github.com/shirou/gopsutil/v3 v3.24.4 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/signalfx/com_signalfx_metrics_protobuf v0.0.3 // indirect
github.com/signalfx/sapm-proto v0.14.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
+ github.com/spf13/afero v1.11.0 // indirect
+ github.com/spf13/cast v1.6.0 // indirect
+ github.com/spf13/jwalterweatherman v1.0.0 // indirect
+ github.com/stormcat24/protodep v0.1.8 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
github.com/tidwall/gjson v1.12.1 // indirect
@@ -346,62 +402,64 @@ require (
github.com/xdg-go/scram v1.1.2 // indirect
github.com/xdg-go/stringprep v1.0.4 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
- go.etcd.io/bbolt v1.3.9 // indirect
+ go.etcd.io/bbolt v1.3.10 // indirect
go.opencensus.io v0.24.0 // indirect
- go.opentelemetry.io/collector v0.98.0 // indirect
- go.opentelemetry.io/collector/config/configauth v0.98.0 // indirect
- go.opentelemetry.io/collector/config/configcompression v1.5.0 // indirect
- go.opentelemetry.io/collector/config/configgrpc v0.98.0 // indirect
- go.opentelemetry.io/collector/config/confighttp v0.98.0 // indirect
- go.opentelemetry.io/collector/config/confignet v0.98.0 // indirect
- go.opentelemetry.io/collector/config/configopaque v1.5.0 // indirect
- go.opentelemetry.io/collector/config/configretry v0.98.0 // indirect
- go.opentelemetry.io/collector/config/configtelemetry v0.98.0 // indirect
- go.opentelemetry.io/collector/config/configtls v0.98.0 // indirect
- go.opentelemetry.io/collector/config/internal v0.98.0 // indirect
- go.opentelemetry.io/collector/connector v0.98.0 // indirect
- go.opentelemetry.io/collector/consumer v0.98.0 // indirect
- go.opentelemetry.io/collector/extension/auth v0.98.0 // indirect
- go.opentelemetry.io/collector/pdata v1.5.0 // indirect
- go.opentelemetry.io/collector/semconv v0.98.0 // indirect
- go.opentelemetry.io/collector/service v0.98.0 // indirect
- go.opentelemetry.io/contrib/config v0.4.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
- go.opentelemetry.io/contrib/propagators/b3 v1.25.0 // indirect
- go.opentelemetry.io/contrib/zpages v0.50.0 // indirect
- go.opentelemetry.io/otel v1.25.0 // indirect
- go.opentelemetry.io/otel/bridge/opencensus v1.25.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.25.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.25.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.25.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0 // indirect
- go.opentelemetry.io/otel/exporters/prometheus v0.47.0 // indirect
- go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.25.0 // indirect
- go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.25.0 // indirect
- go.opentelemetry.io/otel/metric v1.25.0 // indirect
- go.opentelemetry.io/otel/sdk v1.25.0 // indirect
- go.opentelemetry.io/otel/sdk/metric v1.25.0 // indirect
- go.opentelemetry.io/otel/trace v1.25.0 // indirect
- go.opentelemetry.io/proto/otlp v1.1.0 // indirect
+ go.opentelemetry.io/collector v0.102.1 // indirect
+ go.opentelemetry.io/collector/config/configauth v0.102.1 // indirect
+ go.opentelemetry.io/collector/config/configcompression v1.9.0 // indirect
+ go.opentelemetry.io/collector/config/configgrpc v0.102.1 // indirect
+ go.opentelemetry.io/collector/config/confighttp v0.102.1 // indirect
+ go.opentelemetry.io/collector/config/confignet v0.102.1 // indirect
+ go.opentelemetry.io/collector/config/configopaque v1.9.0 // indirect
+ go.opentelemetry.io/collector/config/configretry v0.102.1 // indirect
+ go.opentelemetry.io/collector/config/configtelemetry v0.102.1 // indirect
+ go.opentelemetry.io/collector/config/configtls v0.102.1 // indirect
+ go.opentelemetry.io/collector/config/internal v0.102.1 // indirect
+ go.opentelemetry.io/collector/connector v0.102.1 // indirect
+ go.opentelemetry.io/collector/consumer v0.102.1 // indirect
+ go.opentelemetry.io/collector/extension/auth v0.102.1 // indirect
+ go.opentelemetry.io/collector/pdata v1.9.0 // indirect
+ go.opentelemetry.io/collector/semconv v0.102.1 // indirect
+ go.opentelemetry.io/collector/service v0.102.1 // indirect
+ go.opentelemetry.io/contrib/config v0.7.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect
+ go.opentelemetry.io/contrib/propagators/b3 v1.27.0 // indirect
+ go.opentelemetry.io/contrib/zpages v0.52.0 // indirect
+ go.opentelemetry.io/otel v1.27.0 // indirect
+ go.opentelemetry.io/otel/bridge/opencensus v1.27.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 // indirect
+ go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 // indirect
+ go.opentelemetry.io/otel/metric v1.27.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.27.0 // indirect
+ go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect
+ go.opentelemetry.io/otel/trace v1.27.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.2.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
- golang.org/x/crypto v0.22.0 // indirect
- golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
- golang.org/x/mod v0.16.0 // indirect
- golang.org/x/net v0.24.0 // indirect
- golang.org/x/oauth2 v0.18.0 // indirect
- golang.org/x/term v0.19.0 // indirect
- golang.org/x/text v0.14.0 // indirect
+ go.uber.org/dig v1.17.0 // indirect
+ go.uber.org/fx v1.18.2 // indirect
+ golang.org/x/crypto v0.23.0 // indirect
+ golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
+ golang.org/x/mod v0.17.0 // indirect
+ golang.org/x/net v0.25.0 // indirect
+ golang.org/x/oauth2 v0.20.0 // indirect
+ golang.org/x/sync v0.7.0 // indirect
+ golang.org/x/term v0.20.0 // indirect
+ golang.org/x/text v0.15.0 // indirect
golang.org/x/time v0.5.0 // indirect
- golang.org/x/tools v0.19.0 // indirect
+ golang.org/x/tools v0.21.0 // indirect
gonum.org/v1/gonum v0.15.0 // indirect
- google.golang.org/api v0.168.0 // indirect
- google.golang.org/appengine v1.6.8 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect
- google.golang.org/grpc v1.63.2 // indirect
- google.golang.org/protobuf v1.33.0 // indirect
+ google.golang.org/api v0.169.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 // indirect
+ google.golang.org/grpc v1.64.0 // indirect
+ google.golang.org/protobuf v1.34.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
@@ -412,7 +470,7 @@ require (
k8s.io/client-go v0.29.3 // indirect
k8s.io/klog/v2 v2.120.1 // indirect
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
- k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect
+ k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect
sigs.k8s.io/controller-runtime v0.17.3 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
diff --git a/go.sum b/go.sum
index 6516fc48d0..0d795cf5a3 100644
--- a/go.sum
+++ b/go.sum
@@ -19,10 +19,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg=
-cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40=
-cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68 h1:aRVqY1p2IJaBGStWMsQMpkAa83cPkCDLl80eOj0Rbz4=
-cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68/go.mod h1:1a3eRNYX12fs5UABBIXS8HXVvQbX9hRB/RkEBPORpe8=
+cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
+cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@@ -61,64 +59,171 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
+github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Code-Hex/go-generics-cache v1.3.1 h1:i8rLwyhoyhaerr7JpjtYjJZUcCbWOdiYO3fZXLiEC4g=
github.com/Code-Hex/go-generics-cache v1.3.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
-github.com/DataDog/agent-payload/v5 v5.0.111 h1:mM+4OBkXF9tjKV0VjwnNO5As9aKcNAEsagvKDSBaTyc=
-github.com/DataDog/agent-payload/v5 v5.0.111/go.mod h1:COngtbYYCncpIPiE5D93QlXDH/3VAKk10jDNwGHcMRE=
-github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1-0.20240321095122-a3c5dbb936ae h1:aVo1Uh2WQ8TvgbjqlbDvfP5AcUtnqXUUrc9pVP8MvKc=
-github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1-0.20240321095122-a3c5dbb936ae/go.mod h1:AVPQWekk3h9AOC7+plBlNB68Sy6UIGFoMMVUDeSoNoI=
-github.com/DataDog/datadog-agent/pkg/proto v0.52.1-0.20240321095122-a3c5dbb936ae h1:b6lU79trCyadhkxhb51jXiqmZaHs1Z0fwWlWKFVCqJ4=
-github.com/DataDog/datadog-agent/pkg/proto v0.52.1-0.20240321095122-a3c5dbb936ae/go.mod h1:s6zD4ZvPBta68SBCsCnnbn3VJzoQk5wNd0VJOpB84Ug=
-github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.1-0.20240321095122-a3c5dbb936ae h1:/vfuF60+5qeGM62IDG1F6Asfa1VGAohiQVivRo9TWoo=
-github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.1-0.20240321095122-a3c5dbb936ae/go.mod h1:JhAilx32dkIgoDkFXquCTfaWDsAOfe+vfBaxbiZoPI0=
-github.com/DataDog/datadog-agent/pkg/trace v0.52.1-0.20240321095122-a3c5dbb936ae h1:B1E9jVLpw+Hrcv9ggzCridX3YMeUmZBE3zHghdwfTSc=
-github.com/DataDog/datadog-agent/pkg/trace v0.52.1-0.20240321095122-a3c5dbb936ae/go.mod h1:1FTu8gSg8Pd8DVgRfxlcPN4F+d8K7A3lbMCT84FX44Y=
-github.com/DataDog/datadog-agent/pkg/util/cgroups v0.52.1-0.20240321095122-a3c5dbb936ae h1:GL9JPEUyKDH2brURt09llrqWBmExCuqbHeWucezZ6sY=
-github.com/DataDog/datadog-agent/pkg/util/cgroups v0.52.1-0.20240321095122-a3c5dbb936ae/go.mod h1:OnkC+HYj+NU9dmowWVtFpmnA4wVVibejbzID7TNm7jA=
-github.com/DataDog/datadog-agent/pkg/util/log v0.52.1-0.20240321095122-a3c5dbb936ae h1:sUEteYos3JjJJB1+A9inYJ3uzM18t9cygjSs0cik8x4=
-github.com/DataDog/datadog-agent/pkg/util/log v0.52.1-0.20240321095122-a3c5dbb936ae/go.mod h1:z4lpju3fhDa8JcbXxU0Bc6A6ci8xoQMuxDXbBdf4gHw=
-github.com/DataDog/datadog-agent/pkg/util/pointer v0.52.1-0.20240321095122-a3c5dbb936ae h1:qveAzof7ZZXvm93FL7h5mRM8B4q9w14g2fx6wcZt88k=
-github.com/DataDog/datadog-agent/pkg/util/pointer v0.52.1-0.20240321095122-a3c5dbb936ae/go.mod h1:HgJEYNmnFTKIuBhWxYe1coqmzoJXMxQTfK+4wIG5G1Q=
-github.com/DataDog/datadog-agent/pkg/util/scrubber v0.52.1-0.20240321095122-a3c5dbb936ae h1:YZz6I8ym9P4MLytAdAJlafF3tgItgAGZrDqe4otbVUk=
-github.com/DataDog/datadog-agent/pkg/util/scrubber v0.52.1-0.20240321095122-a3c5dbb936ae/go.mod h1:EVETfdJCkqy0YEvSpQd9LZdcYQ7vrUomCm+bQ6h3lc4=
-github.com/DataDog/datadog-api-client-go/v2 v2.24.0 h1:7G+eyezFM8gHq5dOHcrQcGVxrXnwPqX2yYHxsLiq3iM=
-github.com/DataDog/datadog-api-client-go/v2 v2.24.0/go.mod h1:QKOu6vscsh87fMY1lHfLEmNSunyXImj8BUaUWJXOehc=
+github.com/DataDog/agent-payload/v5 v5.0.119 h1:PgeA41P3BE4z+b0j61B6yXnZDw7tB7bxl5EIHyGVG14=
+github.com/DataDog/agent-payload/v5 v5.0.119/go.mod h1:FgVQKmVdqdmZTbxIptqJC/l+xEzdiXsaAOs/vGAvWzs=
+github.com/DataDog/datadog-agent/cmd/agent/common/path v0.54.0-rc.5 h1:nLwtiYCoD+yb4HfhcJ3hmD9pczYLveOJexH5OHVQWsk=
+github.com/DataDog/datadog-agent/cmd/agent/common/path v0.54.0-rc.5/go.mod h1:wJQkyIKR2epsYERgVRrO55fgda+oS5ZyBy2neJFXXGM=
+github.com/DataDog/datadog-agent/comp/core/config v0.54.0-rc.5 h1:DKefO1N2aGtsUWRxyziQh3cHS3LuqdFjd+QmHR/qLNk=
+github.com/DataDog/datadog-agent/comp/core/config v0.54.0-rc.5/go.mod h1:nDpszL8YD58OU/w2uFtN6oHv0cDDZrRNufYgBDtleVw=
+github.com/DataDog/datadog-agent/comp/core/flare/types v0.54.0-rc.5 h1:GTYjjIV8YupcNqcNgcAnrNkbofcb6w7JFVdtfv9On9Q=
+github.com/DataDog/datadog-agent/comp/core/flare/types v0.54.0-rc.5/go.mod h1:aChd0OV9bB534m+H4IPglCAs441s/1R+Tau5USeSM+Q=
+github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.54.0-rc.5 h1:yz7efcj2vC0N0bcRwVItXXsFUl4JFgEzsqe3lDhdu7s=
+github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.54.0-rc.5/go.mod h1:GhQb1d4Z4Z/E/WnUpmtSKADJSwziEIz0jwU0/MvNTic=
+github.com/DataDog/datadog-agent/comp/core/log v0.54.0-rc.5 h1:2tvd3K+Bat8hT2jJBpIAfH/8J9k7v+s+IQcoyVVQitU=
+github.com/DataDog/datadog-agent/comp/core/log v0.54.0-rc.5/go.mod h1:mtMxZiwg13b4bHgDf8xE6FHgTcadzI5Cc0lx2MSY1mE=
+github.com/DataDog/datadog-agent/comp/core/secrets v0.54.0-rc.5 h1:hhkI7EtQwvhrKXO+oNo7V2ZhlSJ2Pt3uj9rOUeVrlSs=
+github.com/DataDog/datadog-agent/comp/core/secrets v0.54.0-rc.5/go.mod h1:Bx0rH0jqLkDzQCBeDsgKGmcehYoQjuO6fE8aGjiQe2A=
+github.com/DataDog/datadog-agent/comp/core/telemetry v0.54.0-rc.5 h1:v8WyP8srS/5EfD7OKZCm1aRPT56AXaQ2QqQRO2+fwUM=
+github.com/DataDog/datadog-agent/comp/core/telemetry v0.54.0-rc.5/go.mod h1:ZpND0xZO2YVm7FNbPOrVoRMbPFhVRbZC40Vb5xsgyQc=
+github.com/DataDog/datadog-agent/comp/def v0.54.0-rc.5 h1:pVYCT2SztEIcF03BdbrDgPfT7C/Pc2earv9YB4/2lU0=
+github.com/DataDog/datadog-agent/comp/def v0.54.0-rc.5/go.mod h1:ts1c7j9tfa/BaoBH/gdQPt1LTeLeSh0b7IHMqm7ylDE=
+github.com/DataDog/datadog-agent/comp/logs/agent/config v0.54.0-rc.5 h1:V4zdArRqhlZWT0tWj8mJE/pDRcAs4IkBC7Y5N+Gjr7U=
+github.com/DataDog/datadog-agent/comp/logs/agent/config v0.54.0-rc.5/go.mod h1:88zmScug+KuG8EEuyaf3CAw5idi7XFMDTq66u6UPW4A=
+github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline v0.54.0-rc.5 h1:FAZQ6viOK9CiU8x7aW2PJMlI4nq3bmNnOT642ASUxxc=
+github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline v0.54.0-rc.5/go.mod h1:c+WZvnTktqP8inoph1/wvxvfLx9rV/cKoR/zvoEpu6E=
+github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl v0.54.0-rc.5 h1:dtJiztozoBiltjE0382hAMtDeEfRnEov186xGZ4TGB0=
+github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl v0.54.0-rc.5/go.mod h1:yMYGvbt3AAXiXoadUxEbxWl6C3/Ub2t8dn2UG7Y54SM=
+github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter v0.54.0-rc.5 h1:5HcA9XYLdI0Z81o5p7SppdVCYjKoshv9GBrqZfeLvC4=
+github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter v0.54.0-rc.5/go.mod h1:R2jDIkQpwYzy4AturN73oJB2X32jtivHW9TvdeN7BAQ=
+github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient v0.0.0-20240525065430-d0b647bcb646 h1:KeIeWDAjzQxL6/ruBQmFlT/FRstEz11z/UT3LH+8sAA=
+github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient v0.0.0-20240525065430-d0b647bcb646/go.mod h1:TIf/0Kb3DAYAtYbhhxmZ72etu+IKSqy66mcIGBuioik=
+github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor v0.0.0-20240525065430-d0b647bcb646 h1:dyGnQr1QVLhwLdKzCTMsDsnYYUOkOXU4JWT2HuS1LJ4=
+github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor v0.0.0-20240525065430-d0b647bcb646/go.mod h1:m7gt1CfJ/8FgnidFPk2lENaxO9m0mOnWWJQylgJkMzw=
+github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil v0.54.0-rc.5 h1:GuGbndVAnhNa6JOtDsDEWXmZFJB0fp4iCnJ3OO1JSks=
+github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil v0.54.0-rc.5/go.mod h1:6ydg+CWz9UlcBkV4pptOdQXTLNc/8YkTqta6VJFGg1M=
+github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.54.0-rc.5 h1:YEvetFgYjf3oZsliKn0ddrz1t4ZvOzzlvOxJ100szlI=
+github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.54.0-rc.5/go.mod h1:Mpe5lBwTlmLsu1WwIx8uRbj+Bi8+9gW8rWDfXPRwQVQ=
+github.com/DataDog/datadog-agent/pkg/config/env v0.54.0-rc.5 h1:D4e5j805YtqmdhSeCXax020Nht6arNUvuvX4AKDMfu8=
+github.com/DataDog/datadog-agent/pkg/config/env v0.54.0-rc.5/go.mod h1:9FPuluS8k4LEpfvlz7KQ6iHmqyTVfvuKxhwZkEUeoI4=
+github.com/DataDog/datadog-agent/pkg/config/logs v0.54.0-rc.5 h1:xJ6u0OftjMYixmaDlb5yba3T72kyo/rpI7iuOYiAgJE=
+github.com/DataDog/datadog-agent/pkg/config/logs v0.54.0-rc.5/go.mod h1:uo77w0taiJfXk3RV4N71t3WmoHhu6wJ782YmGeV6Ynk=
+github.com/DataDog/datadog-agent/pkg/config/model v0.54.0-rc.5 h1:6HhxZQzS2edPF9ZevjWtstu6y17lj4a4fsRiTJ3ObHI=
+github.com/DataDog/datadog-agent/pkg/config/model v0.54.0-rc.5/go.mod h1:X6U3mq0dErErZUIQx3GMmrO0jdp7aGN656iSDUW8sfw=
+github.com/DataDog/datadog-agent/pkg/config/setup v0.54.0-rc.5 h1:+kl+DEK320zyWZBoFzTofgwN0AzUKpJByf8KdJ7WpJQ=
+github.com/DataDog/datadog-agent/pkg/config/setup v0.54.0-rc.5/go.mod h1:2qnUs+85QVGqdK4DOpEaox6MKbfKFo3v2MXl9W8zP9Y=
+github.com/DataDog/datadog-agent/pkg/config/utils v0.54.0-rc.5 h1:FQmC8qbWoaH4GFzuSuGFtJCR3kleh6xBmQXISiE8/Ho=
+github.com/DataDog/datadog-agent/pkg/config/utils v0.54.0-rc.5/go.mod h1:pWPWng1HBqWccXoYJXfmWgpr+pujCpe4XYtGbiH4IOU=
+github.com/DataDog/datadog-agent/pkg/logs/auditor v0.54.0-rc.5 h1:dQIi1vw2MaBmrT0kQv0qE5csFecCzaoPGY8m8KTEiqA=
+github.com/DataDog/datadog-agent/pkg/logs/auditor v0.54.0-rc.5/go.mod h1:7w1fYTvDKwfk2J08P3vKEieeULSFASfZ0IgCjEoIF2E=
+github.com/DataDog/datadog-agent/pkg/logs/client v0.54.0-rc.5 h1:YMYTquLk0uzYxvyRR1ew1QBf5dMtz3b/uBuwQzVODs4=
+github.com/DataDog/datadog-agent/pkg/logs/client v0.54.0-rc.5/go.mod h1:YZbEwh9sPrDqxj02BHUHN6ljR+wohaN5QkkTdNZBLxY=
+github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.54.0-rc.5 h1:klt8iysuxBuycyDu6lE51oCcAzZLJpvA7HiAKyQv9GM=
+github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.54.0-rc.5/go.mod h1:93tXt/40ar1666Kk83CDOsb35xhAhQiPSqEH24zZ164=
+github.com/DataDog/datadog-agent/pkg/logs/message v0.54.0-rc.5 h1:tX7zHQ6bmLJTfcRTcGY+91VXCZPEY2/OxKlVg/P4Tec=
+github.com/DataDog/datadog-agent/pkg/logs/message v0.54.0-rc.5/go.mod h1:SGUmU3Fz/AtmdJvfos5EokpT4CN9YVi50DbYYZX0q4w=
+github.com/DataDog/datadog-agent/pkg/logs/metrics v0.54.0-rc.5 h1:tzzzy68wHmQLIrpbJbmo9ZYqm8pHTlWvjUYnQUMBsUU=
+github.com/DataDog/datadog-agent/pkg/logs/metrics v0.54.0-rc.5/go.mod h1:vcMlIQfRoHkafZFAHtHrkmeCSQzGpXtE2iyLkaObnUk=
+github.com/DataDog/datadog-agent/pkg/logs/pipeline v0.54.0-rc.5 h1:yJ3W4/CQwlEifXz1FDCehvoVvGwaH4LXuJ6A/TbRMdc=
+github.com/DataDog/datadog-agent/pkg/logs/pipeline v0.54.0-rc.5/go.mod h1:WsysKMxzTLQHn75WAUwHHw/SyhGQr+m8fYE3iMYl2gA=
+github.com/DataDog/datadog-agent/pkg/logs/processor v0.54.0-rc.5 h1:RqKUVfSHrw5D8nRMIhOaAKQThk7pHRbyDVo2CylYtAY=
+github.com/DataDog/datadog-agent/pkg/logs/processor v0.54.0-rc.5/go.mod h1:LwAwQKHHSsOC0sspi58vpN2h4tFEI1ZNtU69OUVFBxc=
+github.com/DataDog/datadog-agent/pkg/logs/sds v0.54.0-rc.5 h1:u8dgzzSluJx7WMk3J834CxCP7FCo+z0PU9d1vj7K1HU=
+github.com/DataDog/datadog-agent/pkg/logs/sds v0.54.0-rc.5/go.mod h1:+octueIX19RzWrarAkKNIAtFTfETavn6Ku4s217EcSY=
+github.com/DataDog/datadog-agent/pkg/logs/sender v0.54.0-rc.5 h1:n5UMZvF+5L3XekeS5bkR0adwcOsogIT9uEtq2vHMhtU=
+github.com/DataDog/datadog-agent/pkg/logs/sender v0.54.0-rc.5/go.mod h1:dj51DKlSkf10tqsdfuE4NMZaD2PSzau3G8Le8UmXerA=
+github.com/DataDog/datadog-agent/pkg/logs/sources v0.54.0-rc.5 h1:wpvvYERlZsBmdoVMIYFxd2XlvSjBh5en06WkKgWRYk4=
+github.com/DataDog/datadog-agent/pkg/logs/sources v0.54.0-rc.5/go.mod h1:bBzxEwKqZRKlkizVUViWjGNkjjF58njl+YiZNLxD/ZI=
+github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.54.0-rc.5 h1:KuPBZTbwVJqDMa9ATfg0YWpGl3CxgGJcJZzSmsGVNdI=
+github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.54.0-rc.5/go.mod h1:xsyGZLKEr6dZaJlC9QWuyYVXqMnHRwZzn641O3bfB98=
+github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.54.0-rc.5 h1:GAai92l2sX1rOQqGzPzE0IGMzS442xTLrHF/EwTywnw=
+github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.54.0-rc.5/go.mod h1:TsFFZ6y2/brcnkXLZhfp0k2xrewdyqhXnjxVpkOWIvE=
+github.com/DataDog/datadog-agent/pkg/logs/util/testutils v0.54.0-rc.5 h1:9mAiLISlZcol07wCndWd5ErjItibOClx+N2Oyo3lG+A=
+github.com/DataDog/datadog-agent/pkg/logs/util/testutils v0.54.0-rc.5/go.mod h1:J5lpbKKJzxPE2i5dsBYJnxbHlFvzPkfxNlhh2p3s18s=
+github.com/DataDog/datadog-agent/pkg/obfuscate v0.54.0-rc.5 h1:qZg/3r8lLIgwzpHkqNBdnDKCdHexSxjgkb6FbPBaMnY=
+github.com/DataDog/datadog-agent/pkg/obfuscate v0.54.0-rc.5/go.mod h1:4/9D8y6pQo5a/Tg8GAQN8SaRIRWxxyl5QHzPRuu8D0k=
+github.com/DataDog/datadog-agent/pkg/proto v0.54.0-rc.5 h1:0HdqgTFchLs6ElI9cVrfjsT/gOF8A7PiY/VI2JMDFJc=
+github.com/DataDog/datadog-agent/pkg/proto v0.54.0-rc.5/go.mod h1:gHkSUTn6H6UEZQHY3XWBIGNjfI3Tdi0IxlrxIFBWDwU=
+github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.54.0-rc.5 h1:wM6w1HtUOxrn/sgMW00jx+0p4rNzPv+NhPqmIpboyV0=
+github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.54.0-rc.5/go.mod h1:3yFk56PJ57yS1GqI9HAsS4PSlAeGCC9RQA7jxKzYj6g=
+github.com/DataDog/datadog-agent/pkg/status/health v0.54.0-rc.5 h1:WgoyD4RkoGI3JDy+79SyiPc0AJGyCO5c2toOfxWwW1E=
+github.com/DataDog/datadog-agent/pkg/status/health v0.54.0-rc.5/go.mod h1:sNLVlyEtkYal/+kTj+mTp239DdwVuYBYOnpS90RKk5E=
+github.com/DataDog/datadog-agent/pkg/telemetry v0.54.0-rc.5 h1:iXZEgoYil4VaadjDsK+GbCkY7rBQyjsnlNJwXr6BwCY=
+github.com/DataDog/datadog-agent/pkg/telemetry v0.54.0-rc.5/go.mod h1:4GCwM87F7u9HzugAODVTwS05MFMKC+NoJu6E4Zd/8pY=
+github.com/DataDog/datadog-agent/pkg/trace v0.54.0-rc.5 h1:il5yFFe5CubI+bSfHolKr0077dQw0R+U5B9/iwW8aBg=
+github.com/DataDog/datadog-agent/pkg/trace v0.54.0-rc.5/go.mod h1:63uSQX6TVeJzl1cFbIp8MkXlF4kUMJs8iUfGhrSgUHY=
+github.com/DataDog/datadog-agent/pkg/util/backoff v0.54.0-rc.5 h1:eq+OaYTZ0rzQt1SpC2Tl+szJjUTX2nOC4N0glSzuWQ0=
+github.com/DataDog/datadog-agent/pkg/util/backoff v0.54.0-rc.5/go.mod h1:HcSwqoxWLfevi1vuDZuFeRHfSuHGakTN6/u42WbxQHE=
+github.com/DataDog/datadog-agent/pkg/util/cgroups v0.54.0-rc.5 h1:z5NnMMxKxO+cExUt0QCCSF7E+wX8LdD3nuL9LlDpR3Y=
+github.com/DataDog/datadog-agent/pkg/util/cgroups v0.54.0-rc.5/go.mod h1:40rjMMqmvglVjktjWVUIk3o5W2zAtek0JU4FCsf7bsM=
+github.com/DataDog/datadog-agent/pkg/util/executable v0.54.0-rc.5 h1:kxGwRdBPbiEx9mplVMA/rdLKMWcaKJMqO1pIyg56sLk=
+github.com/DataDog/datadog-agent/pkg/util/executable v0.54.0-rc.5/go.mod h1:hUJvBzOKfasYzSvlar10ZkrB8I1CR8PhtyGxmlMv/cU=
+github.com/DataDog/datadog-agent/pkg/util/filesystem v0.54.0-rc.5 h1:2A1/eg6zwNlUOjdAgaSgXR8eFR0HYLyqpvqLjlxKEP4=
+github.com/DataDog/datadog-agent/pkg/util/filesystem v0.54.0-rc.5/go.mod h1:DfBDL+EjOiLI+HYR2N5zw1an1q7oCJQT6lY4oTCZxZ4=
+github.com/DataDog/datadog-agent/pkg/util/fxutil v0.54.0-rc.5 h1:Fv7vJj/7qoEIV+ppI257ncx8IVQkDrUyjKSyeE56oTs=
+github.com/DataDog/datadog-agent/pkg/util/fxutil v0.54.0-rc.5/go.mod h1:xqWrlGtjVu2ZMoCW9e3R7f13/Qg0IQBzO5AX7J1dC9I=
+github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.54.0-rc.5 h1:1buVvWK8DXUveIpZMDc7Rn59mCDWZkQ9OGHjQRKdcas=
+github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.54.0-rc.5/go.mod h1:FqMWIncd1TJJdlUYyiQQGJHWe+ZDKAjJSSPlrUBvHHk=
+github.com/DataDog/datadog-agent/pkg/util/http v0.54.0-rc.5 h1:81sBly4SXW5Bvt3DHiUuuLpQnEUjhgdVDK69bk0g+58=
+github.com/DataDog/datadog-agent/pkg/util/http v0.54.0-rc.5/go.mod h1:hZRz30Xaqm+UiR/KjO6gAo1Qa7v+tsxVqqaWgUkBjxE=
+github.com/DataDog/datadog-agent/pkg/util/log v0.54.0-rc.5 h1:BcA7+MK04/HRCFlX5Pn/sQzZu/gF4qA3DOF880Jy/oo=
+github.com/DataDog/datadog-agent/pkg/util/log v0.54.0-rc.5/go.mod h1:MFVWM0DMaxEXhCnHLWnh6axtwubmKRn7C+w2xiL+atI=
+github.com/DataDog/datadog-agent/pkg/util/optional v0.54.0-rc.5 h1:3+7PwCbvtkOf63fPOzzMGVxNzlaZofPAg6ExPKn72hc=
+github.com/DataDog/datadog-agent/pkg/util/optional v0.54.0-rc.5/go.mod h1:wa1JIWnzo9EvOBwzDOeff8HeqgOq9JQANRb99/nKg3A=
+github.com/DataDog/datadog-agent/pkg/util/pointer v0.54.0-rc.5 h1:3zfbAoA9HSCJ/RsR/kP2MzntIvoI4szoReu4j3gpJrY=
+github.com/DataDog/datadog-agent/pkg/util/pointer v0.54.0-rc.5/go.mod h1:ahpEhdpkifOLy0/XLHas+SseEmMQnwMgV1CMVkSuXL0=
+github.com/DataDog/datadog-agent/pkg/util/scrubber v0.54.0-rc.5 h1:Sob3M0CylnKxZ9FX8Ewjka5VRFYZUQLN2J0kyOGYnMM=
+github.com/DataDog/datadog-agent/pkg/util/scrubber v0.54.0-rc.5/go.mod h1:8ASCNWHQtcmUedxL+WjbCPSIcGIM8LeVzil7JCzx0js=
+github.com/DataDog/datadog-agent/pkg/util/startstop v0.54.0-rc.5 h1:wo+LiNCZ9RQNNuYzCavbgjARHO3OmcqpFw4Mo6J5R+s=
+github.com/DataDog/datadog-agent/pkg/util/startstop v0.54.0-rc.5/go.mod h1:F9ysDtw4BnbZIuKdwOdiHranjG9QMv2o7PMVreIhFzI=
+github.com/DataDog/datadog-agent/pkg/util/statstracker v0.54.0-rc.5 h1:hU+Zvh4IL/HOEAjDzyIhYMQa4ibbX+dM4p5wWWpZ34U=
+github.com/DataDog/datadog-agent/pkg/util/statstracker v0.54.0-rc.5/go.mod h1:KkiJi2f4mcxAJA00ArYw4zTfs0oWMU1bNXqKhymRVZg=
+github.com/DataDog/datadog-agent/pkg/util/system v0.54.0-rc.5 h1:b7GZ1xAbkCtw5/hHwkRJoRiheAd9e+T2slJlD34lXbE=
+github.com/DataDog/datadog-agent/pkg/util/system v0.54.0-rc.5/go.mod h1:gIg9d09X6uFqDFo7Dd48OCOCAtTLTWIE44+f/e35C3c=
+github.com/DataDog/datadog-agent/pkg/util/system/socket v0.54.0-rc.5 h1:alEMYfYHYju+poG90phbdrpg4RYEyK/oFLAi8MM++HU=
+github.com/DataDog/datadog-agent/pkg/util/system/socket v0.54.0-rc.5/go.mod h1:RKqan/bns7z9tqZBWSkshY1e6VisZJaU0b/XQcIDyAQ=
+github.com/DataDog/datadog-agent/pkg/util/testutil v0.54.0-rc.5 h1:+nmZ4SFAHoskytj/q8+ewx7UtvbKGdDzcbPU0rhxIzU=
+github.com/DataDog/datadog-agent/pkg/util/testutil v0.54.0-rc.5/go.mod h1:AyiUcueiQeqBXn60wbVOo3dPYRDNg0Gsfh/GkhatyxE=
+github.com/DataDog/datadog-agent/pkg/util/winutil v0.54.0-rc.5 h1:0eWM/VJT1YPFf+nE5u0QdX1CiCiH2LWuvr4cCJ+nC3M=
+github.com/DataDog/datadog-agent/pkg/util/winutil v0.54.0-rc.5/go.mod h1:m/wWejBoF6cuPzogMq2sgm86vvsnR1bAkw+Fgxq4vns=
+github.com/DataDog/datadog-agent/pkg/version v0.54.0-rc.5 h1:aQEEvgBxk6Vk6ePMldw3fQUYTrl0AaYfUbrT1Ko6bjE=
+github.com/DataDog/datadog-agent/pkg/version v0.54.0-rc.5/go.mod h1:EHeMJExE74SMdRoRCEuLOxbRgjfQwCVngMqFBiz1VN8=
+github.com/DataDog/datadog-api-client-go/v2 v2.26.0 h1:bZr0hu+hx8L91+yU5EGw8wK3FlCVEIashpx+cylWsf0=
+github.com/DataDog/datadog-api-client-go/v2 v2.26.0/go.mod h1:QKOu6vscsh87fMY1lHfLEmNSunyXImj8BUaUWJXOehc=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU=
github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
+github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240419161837-f1b2f553edfe h1:efzxujZ7VHWFxjmWjcJyUEpPrN8qdiZPYb+dBw547Wo=
+github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240419161837-f1b2f553edfe/go.mod h1:TX7CTOQ3LbQjfAi4SwqUoR5gY1zfUk7VRBDTuArjaDc=
github.com/DataDog/go-sqllexer v0.0.9 h1:Cx2Cu1S0hfj4coCCA8hzjM9+UNFRkcu1avIV//RU5Qw=
github.com/DataDog/go-sqllexer v0.0.9/go.mod h1:nB4Ea2YNsqMwtbWMc4Fm/oP98IIrSPapqwOwPioMspY=
-github.com/DataDog/go-tuf v1.0.2-0.5.2 h1:EeZr937eKAWPxJ26IykAdWA4A0jQXJgkhUjqEI/w7+I=
-github.com/DataDog/go-tuf v1.0.2-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0=
+github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4=
+github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0=
github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee h1:tXibLZk3G6HncIFJKaNItsdzcrk4YqILNDZlXPTNt4k=
github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee/go.mod h1:nTot/Iy0kW16bXgXr6blEc8gFeAS7vTqYlhAxh+dbc0=
-github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.13.4 h1:PTIZJAsfnr2XLB3V3duL+mSbZvYV8G3XQ9e57KyOuOA=
-github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.13.4/go.mod h1:5yS6i90MEZS3NG+o7PrQQ8i7OaEMzwQvn4bCZ9h9Rao=
-github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.13.4 h1:B2R7Wh791uphpltmorbvvdKk0rJOhoExwM4NnE7hXTg=
-github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.13.4/go.mod h1:Tk2wwdBgWeSvDPtrGGyym8CdVWSuphiToGc/tRvFoNQ=
-github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.14.0 h1:10TPqpTlIkmDPFWVIEZ4ZX3rWrCrx3rEoeoAooZr6LM=
-github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.14.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY=
-github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.13.4 h1:dU9oPMbAr/wYMNdzhu0pxIhAJOn1Btj1T3ZSyY6RvY0=
-github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.13.4/go.mod h1:ud/Xr5TWUopcaGHdkh9RN8lhnCAFa95X16Rb5mrkE18=
-github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.13.4 h1:ZuYc5Ql/GSyAEMgpKkbQiHOtIHkEpYujITW1wIVFgLE=
-github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.13.4/go.mod h1:YSntkplFoUM1mepnPeJFsoblqD0Wdi+Avg1/d1upgZQ=
-github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.13.4 h1:t/Zh8ztLDBOMRMiuVNFthWMMG5F4POgc5M6+Y3DTX9g=
-github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.13.4/go.mod h1:JfdBBDreQfbIN5FYrAtZV3VmQMXnf9o4tMbaMxWqe40=
-github.com/DataDog/sketches-go v1.4.4 h1:dF52vzXRFSPOj2IjXSWLvXq3jubL4CI69kwYjJ1w5Z8=
-github.com/DataDog/sketches-go v1.4.4/go.mod h1:XR0ns2RtEEF09mDKXiKZiQg+nfZStrq1ZuL1eezeZe0=
+github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.16.1 h1:WD5hPS/KfDaYcWxa636972pQ8iOBN9MC/6Is5aSNZc8=
+github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.16.1/go.mod h1:P/l++2cDCeeq21KSmCEdXdMH9/WMdXP7uA/vjnxhtz8=
+github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.16.0 h1:VJT1Jjlz/ca999FEqaAS+He7S4eB14a+PJjczgRdgAY=
+github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.16.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY=
+github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1 h1:ZI8u3CgdMXpDplrf9/gIr13+/g/tUzUcBMk2ZhXgzLE=
+github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY=
+github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.16.1 h1:px2+7svK86oeCGd+sT1x/9f0pqIJdApGFnWI0AOPXwA=
+github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.16.1/go.mod h1:+LijQ2LdlocAQ4WB+7KsoIGe90bfogkRslubd9swVow=
+github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.16.1 h1:Qm3M3A7/tkYG8JYeF9Mxp3oNEz23EQzvnV0MZR8mJKQ=
+github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.16.1/go.mod h1:HPwVoMccTFfyV94bE29ffwAYnsPykAH7Iso8/5ucLSs=
+github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.16.1 h1:sQoxh5RytdEFYFLGw3JUwFFhS/A1InFFVldpWAS/Z3g=
+github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.16.1/go.mod h1:6eYyd+lJYH+uRuZqhyW/u+9ykaXBWetDGj44+txz6jU=
+github.com/DataDog/sketches-go v1.4.5 h1:ki7VfeNz7IcNafq7yI/j5U/YCkO3LJiMDtXz9OMQbyE=
+github.com/DataDog/sketches-go v1.4.5/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg=
+github.com/DataDog/viper v1.13.3 h1:0++798wZLEenL1JbF8dMmwFl6WMlAVxLtzVuws/LlVc=
+github.com/DataDog/viper v1.13.3/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc=
github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
-github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.22.0 h1:PWcDbDjrcT/ZHLn4Bc/FuglaZZVPP8bWO/YRmJBbe38=
-github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.22.0/go.mod h1:XEK/YHYsi+Wk2Bk1+zi/he+gjRfDWtoIZEZwuwcYjhk=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.23.0 h1:yRhWveg9NbJcJYoJL4FoSauT2dxnt4N9MIAJ7tvU/mQ=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.23.0/go.mod h1:p2puVVSKjQ84Qb1gzw2XHLs34WQyHTYFZLaVxypAFYs=
github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM=
github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
-github.com/IBM/sarama v1.43.1 h1:Z5uz65Px7f4DhI/jQqEm/tV9t8aU+JUdTyW/K/fCXpA=
-github.com/IBM/sarama v1.43.1/go.mod h1:GG5q1RURtDNPz8xxJs3mgX6Ytak8Z9eLhAkJPObe2xE=
+github.com/IBM/sarama v1.43.2 h1:HABeEqRUh32z8yzY2hGB/j8mHSzC/HA9zlEjqFNCzSw=
+github.com/IBM/sarama v1.43.2/go.mod h1:Kyo4WkF24Z+1nz7xeVUFWIuKVV8RS3wM8mkvPKMdXFQ=
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Showmax/go-fqdn v1.0.0 h1:0rG5IbmVliNT5O19Mfuvna9LL7zlHyRfsSvBPZmF9tM=
@@ -138,10 +243,12 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs=
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
+github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/apache/thrift v0.20.0 h1:631+KvYbsBZxmuJjYwhezVsrfc/TbqtZV4QcxOX1fOI=
github.com/apache/thrift v0.20.0/go.mod h1:hOk1BQqcp2OLzGsyVXdfMk7YFlMxK3aoEVhjD06QhB8=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
@@ -149,58 +256,60 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
-github.com/aws/aws-sdk-go v1.51.17 h1:Cfa40lCdjv9OxC3X1Ks3a6O1Tu3gOANSyKHOSw/zuWU=
-github.com/aws/aws-sdk-go v1.51.17/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
+github.com/aws/aws-sdk-go v1.53.11 h1:KcmduYvX15rRqt4ZU/7jKkmDxU/G87LJ9MUI0yQJh00=
+github.com/aws/aws-sdk-go v1.53.11/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
-github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA=
-github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM=
+github.com/aws/aws-sdk-go-v2 v1.27.0 h1:7bZWKoXhzI+mMR/HjdMx8ZCC5+6fY0lS5tr0bbgiLlo=
+github.com/aws/aws-sdk-go-v2 v1.27.0/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg=
github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw=
-github.com/aws/aws-sdk-go-v2/config v1.27.11 h1:f47rANd2LQEYHda2ddSCKYId18/8BhSRM4BULGmfgNA=
-github.com/aws/aws-sdk-go-v2/config v1.27.11/go.mod h1:SMsV78RIOYdve1vf36z8LmnszlRWkwMQtomCAI0/mIE=
+github.com/aws/aws-sdk-go-v2/config v1.27.16 h1:knpCuH7laFVGYTNd99Ns5t+8PuRjDn4HnnZK48csipM=
+github.com/aws/aws-sdk-go-v2/config v1.27.16/go.mod h1:vutqgRhDUktwSge3hrC3nkuirzkJ4E/mLj5GvI0BQas=
github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ=
-github.com/aws/aws-sdk-go-v2/credentials v1.17.11 h1:YuIB1dJNf1Re822rriUOTxopaHHvIq0l/pX3fwO+Tzs=
-github.com/aws/aws-sdk-go-v2/credentials v1.17.11/go.mod h1:AQtFPsDH9bI2O+71anW6EKL+NcD7LG3dpKGMV4SShgo=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.16 h1:7d2QxY83uYl0l58ceyiSpxg9bSbStqBC6BeEeHEchwo=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.16/go.mod h1:Ae6li/6Yc6eMzysRL2BXlPYvnrLLBg3D11/AmOjw50k=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3 h1:dQLK4TjtnlRGb0czOht2CevZ5l6RSyRWAnKeGd7VAFE=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3/go.mod h1:TL79f2P6+8Q7dTsILpiVST+AL9lkF6PPGI167Ny0Cjw=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.7 h1:lf/8VTF2cM+N4SLzaYJERKEWAXq8MOMpZfU6wEPWsPk=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.7/go.mod h1:4SjkU7QiqK2M9oozyMzfZ/23LmUY+h3oFqhdeP5OMiI=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.7 h1:4OYVp0705xu8yjdyoWix0r9wPIRXnIzzOoUpQVHIJ/g=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.7/go.mod h1:vd7ESTEvI76T2Na050gODNmNU7+OyKrIKroYTu4ABiI=
github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5/go.mod h1:LIt2rg7Mcgn09Ygbdh/RdIm0rQ+3BNkbP1gyVMFtRK0=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.7 h1:/FUtT3xsoHO3cfh+I/kCbcMCN98QZRsiFet/V8QkWSs=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.7/go.mod h1:MaCAgWpGooQoCWZnMur97rGn5dp350w2+CeiV5406wE=
github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.9 h1:UXqEWQI0n+q0QixzU0yUUQBZXRd5037qdInTIHFTl98=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.9/go.mod h1:xP6Gq6fzGZT8w/ZN+XvGMZ2RU1LeEs7b2yUP5DN8NY4=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 h1:f9RyWNtS8oH7cZlbn+/JNPpjUk5+5fLd5lM9M0i49Ys=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5/go.mod h1:h5CoMZV2VF297/VLhRhO1WF+XYWOzXo+4HsObA4HjBQ=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 h1:6cnno47Me9bRykw9AEv9zkXE+5or7jz8TsskTTccbgc=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o=
-github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.5 h1:a3nFS1TFNTH9TVizItnHz3BgPCk5/7ygrZQZAoUV3GA=
-github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.5/go.mod h1:3pzLFJnbjkymz6RdZ963DuvMR9rzrKMXrlbteSk4Sxc=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9 h1:Wx0rlZoEJR7JwlSZcHnEa7CNjrSIyVxMFWGAaXy4fJY=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9/go.mod h1:aVMHdE0aHO3v+f/iw01fmXV/5DbfQ3Bi9nN7nd9bE9Y=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.7 h1:uO5XR6QGBcmPyo2gxofYJLFkcVQ4izOoGDNenlZhTEk=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.7/go.mod h1:feeeAYfAcwTReM6vbwjEyDmiGho+YgBhaFULuXDW8kc=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.54.3 h1:57NtjG+WLims0TxIQbjTqebZUKDM03DfM11ANAekW0s=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.54.3/go.mod h1:739CllldowZiPPsDFcJHNF4FXrVxaSGVnZ9Ez9Iz9hc=
+github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.10 h1:MNECBvcQiQxwBsVwZKShXRc1mrYawtj39jIxPXWeAQY=
+github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.10/go.mod h1:/tT3hQYAj8aGFmy4hYqeR8I5R1uFVaIlHwj6jNU+ohs=
github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk=
-github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 h1:vN8hEbpRnL7+Hopy9dzmRle1xmDc7o8tmY0klsr175w=
-github.com/aws/aws-sdk-go-v2/service/sso v1.20.5/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 h1:Jux+gDDyi1Lruk+KHF91tK2KCuY61kzoCpvtvJJBtOE=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak=
+github.com/aws/aws-sdk-go-v2/service/sso v1.20.9 h1:aD7AGQhvPuAxlSUfo0CWU7s6FpkbyykMhGYMvlqTjVs=
+github.com/aws/aws-sdk-go-v2/service/sso v1.20.9/go.mod h1:c1qtZUWtygI6ZdvKppzCSXsDOq5I4luJPZ0Ud3juFCA=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.3 h1:Pav5q3cA260Zqez42T9UhIlsd9QeypszRPwC9LdSSsQ=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.3/go.mod h1:9lmoVDVLz/yUZwLaQ676TK02fhCu4+PgRSmMaKR1ozk=
github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g=
-github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n2HZPkcKgPAi1phU=
-github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw=
+github.com/aws/aws-sdk-go-v2/service/sts v1.28.10 h1:69tpbPED7jKPyzMcrwSvhWcJ9bPnZsZs18NT40JwM0g=
+github.com/aws/aws-sdk-go-v2/service/sts v1.28.10/go.mod h1:0Aqn1MnEuitqfsCNyKsdKLhDUOr4txD/g19EfiUqgws=
github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q=
github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
+github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
+github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -210,11 +319,14 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I=
github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
+github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A=
+github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE=
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -233,8 +345,8 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
-github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
+github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc=
+github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
@@ -245,10 +357,18 @@ github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtO
github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak=
github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY=
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
@@ -259,9 +379,11 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/digitalocean/godo v1.109.0 h1:4W97RJLJSUQ3veRZDNbp1Ol3Rbn6Lmt9bKGvfqYI5SU=
github.com/digitalocean/godo v1.109.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
@@ -305,21 +427,21 @@ github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCv
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro=
github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
-github.com/expr-lang/expr v1.16.3 h1:NLldf786GffptcXNxxJx5dQ+FzeWDKChBDqOOwyK8to=
-github.com/expr-lang/expr v1.16.3/go.mod h1:uCkhfG+x7fcZ5A5sXHKuQ07jGZRl6J0FCAaf2k4PtVQ=
+github.com/expr-lang/expr v1.16.9 h1:WUAzmR0JNI9JCiF0/ewwHB1gmcGw5wW7nWt8gc6PpCI=
+github.com/expr-lang/expr v1.16.9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
-github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
-github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
+github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
+github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
-github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA=
-github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
+github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
+github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
@@ -367,8 +489,8 @@ github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE=
github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE=
-github.com/go-resty/resty/v2 v2.11.0 h1:i7jMfNOJYMp69lq7qozJP+bjgzfAzeOhuGlyDrqxT/8=
-github.com/go-resty/resty/v2 v2.11.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A=
+github.com/go-resty/resty/v2 v2.12.0 h1:rsVL8P90LFvkUYq/V5BTVe203WfRIU4gvcf+yfzJzGA=
+github.com/go-resty/resty/v2 v2.12.0/go.mod h1:o0yGPrkS3lOe1+eFajk6kBW8ScXzwU3hD69/gt2yB/0=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
@@ -387,11 +509,14 @@ github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x
github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0=
github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw=
github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -484,30 +609,38 @@ github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk=
github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=
github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s=
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ=
-github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8=
-github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE=
+github.com/hashicorp/consul/api v1.28.3 h1:IE06LST/knnCQ+cxcvzyXRF/DetkgGhJoaOFd4l9xkk=
+github.com/hashicorp/consul/api v1.28.3/go.mod h1:7AGcUFu28HkgOKD/GmsIGIFzRTmN0L02AE9Thsr2OhU=
+github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg=
+github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg=
github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
-github.com/hashicorp/consul/sdk v0.16.0 h1:SE9m0W6DEfgIVCJX7xU+iv/hUl4m/nxqMTnCdMxDpJ8=
-github.com/hashicorp/consul/sdk v0.16.0/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A=
+github.com/hashicorp/consul/sdk v0.15.0 h1:2qK9nDrr4tiJKRoxPGhm6B7xJjLVIQqkjiab2M4aKjU=
+github.com/hashicorp/consul/sdk v0.15.0/go.mod h1:r/OmRRPbHOe0yxNahLw7G9x5WG17E1BIECMtCjcPSNo=
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -551,8 +684,8 @@ github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
-github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
+github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
@@ -575,6 +708,8 @@ github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoI
github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
+github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ=
+github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E=
github.com/hetznercloud/hcloud-go/v2 v2.6.0 h1:RJOA2hHZ7rD1pScA4O1NF6qhkHyUdbbxjHgFNot8928=
github.com/hetznercloud/hcloud-go/v2 v2.6.0/go.mod h1:4J1cSE57+g0WS93IiHLV7ubTHItcp+awzeBp5bM9mfA=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
@@ -589,12 +724,10 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/influxdata/go-syslog/v3 v3.0.1-0.20230911200830-875f5bc594a4 h1:2r2WiFeAwiJ/uyx1qIKnV1L4C9w/2V8ehlbJY4gjFaM=
-github.com/influxdata/go-syslog/v3 v3.0.1-0.20230911200830-875f5bc594a4/go.mod h1:1yEQhaLb/cETXCqQmdh7lDjupNAReO7c83AHyK2dJ48=
github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8=
github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
-github.com/jaegertracing/jaeger v1.55.0 h1:IJHzKb2B9EYQyKlE7VSoKzNP3emHeqZWnWrKj+kYzzs=
-github.com/jaegertracing/jaeger v1.55.0/go.mod h1:S884Mz8H+iGI8Ealq6sM9QzSOeU6P+nbFkYw7uww8CI=
+github.com/jaegertracing/jaeger v1.57.0 h1:3wDtUUPs6NRYH7+d+y8MilDkLHdpPrVlQ2wbcsA62bs=
+github.com/jaegertracing/jaeger v1.57.0/go.mod h1:p/1fxIU9hKHl7qEhKC72p2ZYVhvvZvNB73y6V7YyuTs=
github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww=
github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg=
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
@@ -614,6 +747,7 @@ github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHW
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
@@ -626,10 +760,15 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
+github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI=
github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
@@ -654,17 +793,20 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
-github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg=
-github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg=
+github.com/leodido/go-syslog/v4 v4.1.0 h1:Wsl194qyWXr7V6DrGWC3xmxA9Ra6XgWO+toNt2fmCaI=
+github.com/leodido/go-syslog/v4 v4.1.0/go.mod h1:eJ8rUfDN5OS6dOkCOBYlg2a+hbAg6pJa99QXXgMrd98=
+github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b h1:11UHH39z1RhZ5dc4y4r/4koJo6IYFgTRMe/LlwRTEw0=
+github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg=
github.com/leoluk/perflib_exporter v0.2.1 h1:/3/ut1k/jFt5p4ypjLZKDHDqlXAK6ERZPVWtwdI389I=
github.com/leoluk/perflib_exporter v0.2.1/go.mod h1:MinSWm88jguXFFrGsP56PtleUb4Qtm4tNRH/wXNXRTI=
github.com/lightstep/go-expohisto v1.0.0 h1:UPtTS1rGdtehbbAF7o/dhkWLTDI73UifG8LbfQI7cA4=
github.com/lightstep/go-expohisto v1.0.0/go.mod h1:xDXD0++Mu2FOaItXtdDfksfgxfV0z1TMPa+e/EUd0cs=
-github.com/linode/linodego v1.30.0 h1:6HJli+LX7NGu+Sne2G+ux790EkVOWOV/SR4mK3jcs6k=
-github.com/linode/linodego v1.30.0/go.mod h1:/46h/XpmWi//oSA92GX2p3FIxb8HbX7grslPPQalR2o=
+github.com/linode/linodego v1.33.0 h1:cX2FYry7r6CA1ujBMsdqiM4VhvIQtnWsOuVblzfBhCw=
+github.com/linode/linodego v1.33.0/go.mod h1:dSJJgIwqZCF5wnpuC6w5cyIbRtcexAm7uVvuJopGB40=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY=
github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE=
+github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -686,8 +828,8 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
-github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
-github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g=
github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM=
@@ -731,6 +873,8 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
+github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mostynb/go-grpc-compression v1.2.2 h1:XaDbnRvt2+1vgr0b/l0qh4mJAfIxE0bKXtz2Znl3GGI=
@@ -759,172 +903,172 @@ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGV
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider v0.98.0 h1:BygHoE2QajT7PX/UH3dzxNV2frTc9t8+E0lO84N+eBc=
-github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider v0.98.0/go.mod h1:B9nDiPwMAO1k5pFErQhrylE3srkVzLxaTaXWR3Juiwc=
-github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.98.0 h1:AdTQTihowXT+BCtJKz6tYY6QaUf6yirSxIbftIC5Z74=
-github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.98.0/go.mod h1:FCuVdzwR+P15oG18/SjzqIUJGy97sXrXOjCiH6ZMvQs=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.98.0 h1:yNW1HxBL6LbJjhgnNK6RVl6L14O75XOSQ3UyPxmcMtE=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.98.0/go.mod h1:6TMsidQaQ/KEMC3LWWmiVNdNohDv57MTmdvN+0P1og0=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.98.0 h1:WlOCvQTSWObq6yL7/gsCDDbfZN5UUtelnp/bFHpfGU0=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.98.0/go.mod h1:JINul7uL/EQWAT0IHPXf7LQSQHVOjmfUaDB19rzlhAM=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.98.0 h1:WySeirGDSlAJtFX9fvbvFbWPKJHMje0zp2UQBnXZVvY=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.98.0/go.mod h1:Kn10O9ShLjHsRPmfMFvoxAUpD4ha0gIt9hA85JSnS5Y=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.98.0 h1:c4SyYvq2jk94j6SQBYconfrcfCRQVR1RVxnLO9b/ut8=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.98.0/go.mod h1:NOc8sJP2zWgG/ARCUX0rj62Tp9fjh7rBe3f7+S6wJKE=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.98.0 h1:eD48RXTOTI4mUqwmMbifd2xygNPFQk2+Q+h79tMKgRo=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.98.0/go.mod h1:iUXj76MTvLxgCJX6MU5d44bDSJJDkhZyKWTgSqt3DQw=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.98.0 h1:qqbNZig9IqIed6mj9FUJVWabiP+mxY09vF+aW/hX2cU=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.98.0/go.mod h1:hYMt6wWecJJF60oWxRvXgA2LU207PwhMEvVg+/yxBvE=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.98.0 h1:r2hKk2PEwWpQ4upx75jvR0nNbMkMO+EYpL2mByPgV0A=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.98.0/go.mod h1:ziO8zfvZyuWHsvf6FfeB7XWnC85t0cYPfjGHdITPoZU=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.98.0 h1:gkULzydPVCBE4bx4qtvH882QhnDnSaN8PTZjRjkgY/E=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.98.0/go.mod h1:ZPy2cULCck//f/07BUR2mn69P6UcZPZHbY/roEcehtg=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.98.0 h1:9iGIQX91RY84Ubv3AoLxnKPINlbBBEIwkbWWBudR2FA=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.98.0/go.mod h1:Xo12+Z5wg2yJWaoRVesZfFSyBX9r46d82rzEdPhMpkY=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.98.0 h1:PvTmyr1MOFwlKdEqHDKEwoOSLINTiEppcvzp6a2jsFQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.98.0/go.mod h1:fxMPjSrU2yhl0wcc+aBgv1F6brf6A4t2IM/IT1PwLZ0=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.98.0 h1:pYGU+q8T+uUJYZblgtufWcLTYff6Md7kGCJ8YgC9UU8=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.98.0/go.mod h1:OVLmVtFMIEQjHMm1ygJ6dpGYhNxS6AEwhyc39Y0YPTU=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.98.0 h1:I1CrHkeifkWhGVZe7LMzM/sICHC193lbxd8J13ouXos=
-github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.98.0/go.mod h1:rGIasss6gF8lmWZAmWhsKGcQeECTt7OomMeIopFzkO4=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.98.0 h1:Ad9kF7wfdZWJacgtzguVEFvBcc8vthMzIlnMIqhOvKo=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.98.0/go.mod h1:6bivBlhIFVflEYm4mMotqDJ54Z158LAActxNNd4KBGs=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/encoding v0.98.0 h1:GNDpMm6OqCTe7r1PFyDhm4CXQTqYPrnA3FEPt6ZkKzY=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/encoding v0.98.0/go.mod h1:xhqjUKyIvVPljVb4gEsf4pQkrgzUx0H0nBuFrWvRAck=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/encoding/otlpencodingextension v0.98.0 h1:r3lno2DU3JXFlpGmzqylRboJRWAsuGDuvilHur+IYtM=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/encoding/otlpencodingextension v0.98.0/go.mod h1:mS0zmU3ku1ePnCjUmKyf05L5zXOtC5ijtxykyC5nFm8=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.98.0 h1:96d2bgJfwjyX7oisKJjYPPUnhdKVK4mss/mL9f3nw2g=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.98.0/go.mod h1:DQBGDiuwOOmIXIxgWMRiK0Qu7XbAyHO8OEDsjIWEoNk=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.98.0 h1:ieapeK22hUQ2Nonk1dScGkz/YD8c/evZXIvd7BJqA4s=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.98.0/go.mod h1:RIXHU93Zd/EDOySfV6J0V8UU5ej+3ZsyZ0pT0G4XJpI=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.98.0 h1:KlG/2ShapW8OzD0dnWOyUHbtcYARmhHO3vw9x92XKvE=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.98.0/go.mod h1:b5y3WDWK6mqTL6U6eGqDqK2F3O9GncmvtsWmQ03UoVk=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.98.0 h1:iyyYN8Q2YSmdnbbOkymYVC/dLZxiwJ4ICGyBJcpDqQc=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.98.0/go.mod h1:EFyiEYZmYvQpcK9hND69yfjP40E/BEVis3NAHwnw4p8=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.98.0 h1:yend0fdg/ejfVSFOCI8CLo5ikkNhSl41Zs6ma5jUZ4c=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.98.0/go.mod h1:yGkFJl78686wAA5235HdLLQrWlOxuNqnZzQMUz2I7Ac=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage v0.98.0 h1:+hjx5T+FMOyKChkpY03oSUCw3rALupeXFCkbS0Lu4OY=
-github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage v0.98.0/go.mod h1:Y4WKKdXboSG7HuI+Xf0dK8AZTAJKt+XKTvcYiwMwEEw=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.98.0 h1:qmiMPH530Bf16TWjDjvhm+SEAKWb1tPfqdzObNqlNsY=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.98.0/go.mod h1:V235cPM6ngKN9qobDKrYLmWQrJ1WTDL8L0Rx7R/4xtc=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.98.0 h1:dFYXcpV9a+eOnrJHcRptwxfD6UNDSk3O+yTmQJM2tvA=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.98.0/go.mod h1:EW3A0XWGehjQfN6cw17uoDtYqd5SqsXDNfac9WAj8oU=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.98.0 h1:4DP7lHRWCpvPragqqfgaJVeZAkofyri+S6H7IRfRkls=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.98.0/go.mod h1:dkG+z2jMW6p8HgiZauQwJLgeTxQzwa/I1qoeSx0U3wI=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.98.0 h1:4RD0elfzuoOxrBpekmg94JmIJjL8MZIayIUBLva+P44=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.98.0/go.mod h1:BnCYHF+EfdRTriFxwaaA9SuGV7Nri8WkZLXp+8keTg8=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.98.0 h1:EFOeCNi4Llbqg4BcnM/QHK44AAQGQJGJ7UT0sg0hQxI=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.98.0/go.mod h1:cHK6tQZEUnhuElYlgdC62Sm93AcDbaP2um5dO8rsZ0w=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.98.0 h1:gNh2x7mHzpRL1+tpj3n30L1UswcsVen4gyhBWHH3+so=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.98.0/go.mod h1:dPJAgUxoGBqXFPsW0W6rxp43MQi+DZFnPBhYZpT2rIk=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.98.0 h1:JqBTRDU+LCKEekwgg2k3yLYnlfBqtEwkDO6V3w+ezNc=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.98.0/go.mod h1:8J5p03NsrMY8SIjF+8iaOj3NlYEH0gKcz/onmCnhckU=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.98.0 h1:uoXA78WpoXI1n+dexldxSFqe2VvLnz6TXNS77nCjQhQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.98.0/go.mod h1:/PGqMAGLWqyfqgaoXILd+shRDhmdraBsxw5zGeDjyjg=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.98.0 h1:rbfZJ4YA82WSC9HW2jgmY1UZEsOYte8VTjrBFU6dv5Y=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.98.0/go.mod h1:KOTp7iN8sWTmqk/SxbMqvM2KXUdgXRWb6hI84E5arLY=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.98.0 h1:FaldDCQ6hpPAauYZ1kbNWkTFU2vRgL/nr5UY8d2jrT4=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.98.0/go.mod h1:0arlQ0mj/VhcFFSKHDmIc+iieHweXKENSBcqNnAY8OA=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog v0.98.0 h1:02ufmmbRDPh7GiupBzcYaLtkFFc0/RmTIHnfzf4jbpg=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog v0.98.0/go.mod h1:H8ouJ5zqL//b19YqRr8iO4nDmIqfhMWyyY37zWxaLqo=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.98.0 h1:7YqNP644dJRxMcOIJe9GnlBHhpMH8+po3o8vYvQLqb0=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.98.0/go.mod h1:a4PlGMPWY/9zlkqLJ5euyRqg73E0IgBARHhR0GqZLQU=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.98.0 h1:KHO7ptmWdDW1wi0oiDzLNEDyXDG9TFsK/N6LTgyL6JI=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.98.0/go.mod h1:EwaZ9Bj0+7l5roLUkdKIH1pHXdfHCGve/8mZTf5Hzxk=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.98.0 h1:i5+XkPXUFqbNnOYngPq1b7nZ1PyGdAtLwcsEGHJ79yw=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.98.0/go.mod h1:JRRvo6HJ8jtHzHA2/H4+bIZsO5M8gpSXDLZ06fHN7II=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.98.0 h1:wRD+b1q1n6lSzX7HsydCtSyqL2HqW0miyWOw87rI+L8=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.98.0/go.mod h1:1OsbednOq+I/HsWwIhJ8J6Glv8JhWiNNPm9ReVq9Nfk=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.98.0 h1:G4VY01P5r26yAM+ruHnLBTpue1naxBHSSCEYbMzbbUo=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.98.0/go.mod h1:eY3VRXSzUIUSyjsxuKdY6XTzV0oYlge0MMLw7ijd/RM=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.98.0 h1:zDTqAb2pGvqPZLTdKPN0DZqEv5p9X/Hzin+3/ITaVgo=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.98.0/go.mod h1:0rlrjq1QNcrBoMsBSBpJzQZ7o1ptgn+r4bc+nw3KCCA=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.98.0 h1:orhX9ZGjvIUlW1Pesk/q6KVsKZ1S3Ql6KOS+wCJwMDI=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.98.0/go.mod h1:jzd6HhS9Av55YlRctGWG/Jk/rATPYRdVCiOZLlQnDxM=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.98.0 h1:F6ZLrCnfX4JmSQaqfpHY3k7dLjAmXOoYdWbfVkbpT/s=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.98.0/go.mod h1:jdQtG3kZnRRZ6lv/Q3iBINkMN8BfZStsLhG08okP19M=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.98.0 h1:oRra5wW04p+Q/bjxeHww5TM0G5Eo/zNUPplk9LK2S/g=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.98.0/go.mod h1:AIngTy08aFnbn6MJvn4x1ofYr5b62eQQiMk2m2u89NU=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.98.0 h1:32rvTFzoSPdNcFCo1U2x7ohdHSW8L34RaXDlIMFz7qg=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.98.0/go.mod h1:9hTznk6RjG/mvtmTdSeXSVivcsLBKw6WfPJRJX2Wzn0=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.98.0 h1:3JjVzPccHi6r4XPgzH5YIi0/CGHbqVJWADtYi46GfIY=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.98.0/go.mod h1:90/iHUjO/6DGVa161GDsdbntj1r06DRS8ZQBvw+PNOY=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.98.0 h1:6uBxGiwJPCmqhEKKdFbA0ECjM7vaQA+vBO/KQg30ed8=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.98.0/go.mod h1:8IUWugptZJY7den7B0MQk5K8k1SaSaY+kHfBTvBO3SQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.98.0 h1:Y4UbYtpt+AsCVj4W8HKxLSlKNrMLhxypmA34GkwjJGE=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.98.0/go.mod h1:a1eqRzzePACd9YamtiY0+42P4dn1pB0Ozc0B3P6JGMs=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.98.0 h1:vgVVJgTRQzgkIf8iODqKiS5VoMUyUPHRQjAUMOeLJt8=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.98.0/go.mod h1:5RtSFx/r557j1/Sy8+MO+N0ulfEzDwSNwEKo7bdHvYQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.98.0 h1:lZGvpoHCYoEFBDLhnKGGAgGJyX2l2jGZUgC1kTz/esI=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.98.0/go.mod h1:fOHCXaWnVcKJPY9Jxeoyl7tSuuhZ/PPN92ib8eyjSLQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.98.0 h1:F1Vnzkhg0Trncmvw65Mq1wBKPgX2wI3aGE90J7CXhuU=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.98.0/go.mod h1:yybbWtN2RnnIoqw3KzboFjk4iYpEBXPzHQvARs+eO9U=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.98.0 h1:Ml4/JEqJeJknFMiXW5AxtrejrbGXyocRq/BfCCLS5jA=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.98.0/go.mod h1:DjiZ//9SFD9if4d/Q7dFam/4etFiXFpkxZ3kGM7XKmE=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.98.0 h1:R4DL+SLiIs0ChV8nGAyTBngsSiyzTbu9/qn9qJwP0K4=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.98.0/go.mod h1:zhLhabEO8mr1PY65YZuJuFJAbN0938xhi3bVKD8fbnQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.98.0 h1:jINjBgpWO/rU6RKzeHzfJAJCTfqGJcBGBgjwwTiqZBE=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.98.0/go.mod h1:3zw5nWtjNyojk1rOPfJ3TTNNPfRUqEUgJUbytsS3f5c=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.98.0 h1:lSP0bTkEMMFa5DeA0QLwgxA7zAIbq2NeLRioAS4qKVE=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.98.0/go.mod h1:ErAJvPIFFMeb9nL1ePsYbIK18BaVyitIaz3zAlA+msY=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.98.0 h1:dbgJR93JNl/XKbwHwGzISTk0owYODddoDHRIoimpehs=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.98.0/go.mod h1:FjFQFT6tfC7Gac53GC/vorbTwvR/UzRrGh/rgSQsCB4=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.98.0 h1:pM4puW3v2E+kfvuxz9L3bqGXbg/l6skLYVyZE3ksI0Y=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.98.0/go.mod h1:tdgLPheVJOpy9Gic113d7F9+a4S/slFV5OxJAiIpGSk=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.98.0 h1:lx4aKpYWxDc8lFoLv93r4MjalQujua/2E95cZTQRUvc=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.98.0/go.mod h1:RVV7i6YZ4j9Jb0J77znceLRgY2VJQxtdp+Mg9CX6fdg=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.98.0 h1:SRFZwjtkjlh4thYckLGPyKnlJPxWhpYAdMlwrT7iho0=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.98.0/go.mod h1:CgCTyjyZCHfm+nci5W1gqL55imJQXjuETC5fzg54Pus=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.98.0 h1:xn3jwhEm9tWaOjyPH3OwVH032AmKj2UshkeenmKeVQE=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.98.0/go.mod h1:e6bjGXih8jmPZ+h6K5HDE7xFwZv4r+mKiIvqKTHAcgk=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.98.0 h1:F93HvVWJSEUtiwtyqXicjBiq3PhXxVjvX59VJtRuZL0=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.98.0/go.mod h1:vUObVWshD4NKy4CzZpcC8V/avUyFD6WmIwOIaux7oDA=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.98.0 h1:g0EYXHvXQDtex9f2ktNDyUM7fJpaBSWvXSAVFTWywPM=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.98.0/go.mod h1:yzebFLp50w9NL+waozYKJSQKaScjJwqKIP/vorZIWe0=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.98.0 h1:+o1Jm65Yp8Z/EXHwimEfWQlRrnYNpB0ITAH9TFlY96c=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.98.0/go.mod h1:dHnrsXrapzrkj/G5zIYdavY/5d+n1J9qeYXKrepLYf4=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.98.0 h1:oP6FVYInPiAGLW86jsSdVrx8p+X99VbvIgJuY+7tB8E=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.98.0/go.mod h1:yO4iG0XSOS1izfupSqxXubc79WGCrakcvOJdLt5d+iA=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.98.0 h1:Uy8ujg6omBqlotu+rQwM3ysUOBvoCnDghHVr5GqiEvA=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.98.0/go.mod h1:vXQywhIh5jM1/3A1+9jBE8zlzK7Ihj4wzpkKpbAIfBQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.98.0 h1:kW1Ud+M2aipcMtlr6E3FMJIgyM8/DUVefNT6boiZkTg=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.98.0/go.mod h1:AeVJbTS/xLWowVMojvpfRQhsjZwFEiMlANp6IRXterg=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.98.0 h1:Q+NzXfH9LTNupFpUdXFG/q3rka/Hv7lutBX5cjiPYmg=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.98.0/go.mod h1:9G5dg3SYuipPocclXv4U87hQ1/B2T4ca7lzUOLSReko=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.98.0 h1:yWVlov2DodcGqpXU9fwzZcihUdMQkwhl49Vvegcrp7M=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.98.0/go.mod h1:OTUeSFD5shvS4g6ftKfZTBjDNeEwhNST1bX4vTwsKpE=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.98.0 h1:DBCsfHZJt6zEL6aBcZZhHSnKjcLcBbI1c9IAPK1/aLg=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.98.0/go.mod h1:khpK0DS7FJzQmRo2alCBFK4pa2bV9NPLQTLqfx43REI=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.98.0 h1:+xPdlUjZiMTRrZK059U8zPP/IlhYoDt8jT+WpdT+9WE=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.98.0/go.mod h1:8+Kko3psy8Wmkc1q8dpnxzaw9ZbzYHeFovb8ozyr5FM=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.98.0 h1:ldjSfqmbmiv0MuUdp3ey/pLxo9fPyLNW7qozl/EtcHQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.98.0/go.mod h1:7m75cmrBbgROWDE+CSg6gB4qtPTGEt0kEMiG8hHaVA8=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.98.0 h1:t2PT83LU0H3eb25/t5oQNq8DqmykcqI854qH+yh5wrg=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.98.0/go.mod h1:3DHDCqjAfepIqJlTTyUQy3yNaS9Ax6lyGt7v7AUeRwc=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.98.0 h1:ljdy8h+V69mjx4X0Jbu4nt0FbeXa8h53ogie6OIK2zg=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.98.0/go.mod h1:iz/isMSPjHCFKiS9twzsfBMwy1j7p4fAxLSL47mf7zI=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.98.0 h1:03JK/KqkDm74hR+As652i44JaVxF9lKXs3bq2G1Ao7g=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.98.0/go.mod h1:RvXJkNKhI4Sk1nAbHDR8+8XN8caYLdTH1ulm/PL7ssI=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.98.0 h1:SyxXimTyXrV37IIt/wtvxq0M3FJHHVMpzsr/g4juBcc=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.98.0/go.mod h1:KCdrQXuc8nMXw/IBVePQHPXE0Fn/JplJmLtC0o+1I0k=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.98.0 h1:7EYZ/OGcYkETf1TwiQdrNtJjscwPrJhbh4LAx1TJJj8=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.98.0/go.mod h1:szjKDD6fmUcMcAwr/6JRV537TxKZtoaW7GaMS3ut2ns=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver v0.98.0 h1:+l3egIFnvgsdbjhJbyBrFPum5pGYz5PQUP+5ctJumos=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver v0.98.0/go.mod h1:Rk7IXjlZ0yRoucw73hniIp3PrfXIBcfdexhEo9UpaS4=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.98.0 h1:hiudTWIOCQ6boREJ10ijrsaSwB6ywZ+Zf9JpDWBLsfM=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.98.0/go.mod h1:C+3Bs5efAss1Wv0y1Bfq5YWGf8xmoF0d2TCDx/Kr5o8=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.98.0 h1:lR7HN8eOYJfhhFJoyOi4F6r19KcZ45U88sDYfG6Wzv4=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.98.0/go.mod h1:HQ0Z6XkFow6IK0E27bM8hi0a5EaXa23N3alFe1IWraE=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.98.0 h1:ct+a4NWbBgM00d1uxJ+8b3adX+2U6sTuAwpFspMuIp4=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.98.0/go.mod h1:mHVzbFtTDv4AaQ18vWP7rJ4OJjPfoi3TQSrDmOyuhDo=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.98.0 h1:4B13z8toU1BKf86cNmqvfYzs3ipXB73zrwaC/Tg/6t4=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.98.0/go.mod h1:DoJc8YvhFDiQ2vFP5SyN5Rtwxvht0Lb3cLc6D+ugjvI=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.98.0 h1:zlAAkEfq8HMwVPRIMWeoMIdwwO2ULDoTQCj/NxfJ4MI=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.98.0/go.mod h1:JLA1nMGAcSlqAdtK9KuVg092WnrXE2ULkCr1j/1po6g=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.98.0 h1:YRljpLhmw/uZa8Ox8ZAF7E6tttHE3xIDjd8E0ORv/FU=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.98.0/go.mod h1:Co1AmFP7AHWnmNbk8mmJNQupWNICo+9IWJHNQf0F1aM=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.98.0 h1:2VAQfI6o8zunjLzkOZ7Bg9N+annovyIcRjz5Eja8ra0=
-github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.98.0/go.mod h1:ci1rco0CWHtTwJZIk8wlyX3tHoUOEUuqwPHZkNPBaUQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider v0.102.0 h1:IhXkhX9xl3zkWe+JKuzlgmSsN0esLGLHai+tjKuzrMs=
+github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider v0.102.0/go.mod h1:zzrJwGGJmMcGnyjMcmiU+5gHRZfifkW5KwxNsswrZXA=
+github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.102.0 h1:xXCtUKbUJIG1mO3K/FpicATBWQdSxti+BunL8DoAAYg=
+github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.102.0/go.mod h1:lq5ITwNt3wDin/NjdiPlZckoAu8jtkYsCPBVGdBiXuM=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.102.0 h1:uWN/MbNH5JMPJBmq9FyjJWNnAFqhUlz1rvHPJxa4p0I=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.102.0/go.mod h1:mUL5oMDIMZ/ag5fVrC01saUysW34EI3MgdolDteu0wY=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.102.0 h1:cu0kqeY5cveXnrmmYrbm4eJ7TJzuGcl0g8VfeN6BAVw=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.102.0/go.mod h1:wppFe72+YawEA4LkIGAA8Db5U4ZU5Ka2LPNLbSjL/zs=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.102.0 h1:G8GnJzeBbu5MeRcKVquVU1M8hbNgayIMmvsGzZtznik=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.102.0/go.mod h1:w2R2MFYqf6fQsz9OzddYbs/OJ1PuGZxLZMuISBu8lKk=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.102.0 h1:ibVjZzsJ+a0Ak5QhIYqAfjxpt4RKeN7QS/dFkQHwyV0=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.102.0/go.mod h1:qGz/lAZL7DCqVE+kqMvnrBaad4r9iIZwBjzkbaDmAok=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.102.0 h1:eFBPjzw0s4NvbgpYklIbHjf+dR0KhZtinulN9A9Y7uo=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.102.0/go.mod h1:7uYoWqNDjjPNUzR7X2vl/b2Z133nSuj+uNY+y8gMEQ0=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.102.0 h1:R70PpK14trQfL/Vj5oAiGRqX09s2gOWuf6t1Ae5fevQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.102.0/go.mod h1:xmy/yFFmB1Epy+czrYMbA+4xeOKvhFqNqYWU6qINeis=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.102.0 h1:EZuqhQIPU3M+DNRHAhds9KSfyP2im4+dAwRcwf9w5fc=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.102.0/go.mod h1:DZNW1qo7BlkpRO5BDYRm7T9YzTdsHBiQusY61YkzVV4=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.102.0 h1:4OutdzVzy196//evF/UBTar8rSGNaJ3Yuh4Pwg/LAmU=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.102.0/go.mod h1:o57bf/rPEnCn9bNsSb3ATE9Nnp8aoUHlw3KgKfSkfiA=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.102.0 h1:7hcEkWSxneYopMZIAm76c9F7x1/EAgD7lFgKLT1df8U=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.102.0/go.mod h1:xqqyiXyWu+B67gi6Ql8D1KIelMWxFs+iXKXNKn+Pvms=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.102.0 h1:jyFZk2+JJNp74U+KnHVw1xyWQuTEGcJjyl918Nh1Qbc=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.102.0/go.mod h1:xR0NbceSTwHMGKmFTv7M3/9nnhHBkYH/hjcRIi67cUE=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.102.0 h1:P8Q8cE0VFM2IBaFSAe3UKIi7zoce3ASpFC1uxTSVG6g=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.102.0/go.mod h1:iaYat5ExHPLP7ppc0gj/I0pm0W4ZTJtSMWRsBXrLSUk=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.102.0 h1:6X5kC2lCReGG8WtSuNudDAalhXwO75zJ3SkRCy4VNgg=
+github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.102.0/go.mod h1:RkefxcqQcjnPonXBOtFVWLlUGUan1svxVn2IclW9Ikk=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.102.0 h1:7YSL78HaJYjNpTvzBsVi/KLMIUWrLtEsZVxz6Jgxs9s=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.102.0/go.mod h1:A0x5FssgcbFd6sVtdUR388d+BjEO2KDtK8SS4eHFeJI=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/encoding v0.102.0 h1:w485PSqcVIzWhpQ5nKrNT7hsCGyzp9cHi4ULEWCLBJw=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/encoding v0.102.0/go.mod h1:icDqNrFRN2j6JOf0GC4suoOv+pesn8BPgP6TAhzX40o=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/encoding/otlpencodingextension v0.102.0 h1:oROOy1qusmMrl1CzV51TvUZTclFGnU1y+vRse/PnT0U=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/encoding/otlpencodingextension v0.102.0/go.mod h1:83IL66l2OJD+fDIHTvFewRNME/ZG4CHumw5F13ZYUEk=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.102.0 h1:5Z2JnKjMC885NAPUXmb04Rl4GWMLqGhfhnwLXLDFoi0=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.102.0/go.mod h1:BxTVeS5jyOTCRPkrhrS2K0maUL3WywYOg0xhR74taiQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.102.0 h1:raIYOJXzMdl18vevTOPSmVTHzy5peYZii2k/Q06+Vns=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.102.0/go.mod h1:Z4Cz4wOvgbqnPsPZl06AW2x3Mr5lJkEBYcF0ytl0UAI=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.102.0 h1:pynCEn05oq47ov80aIiratpdoG/0GytiZ5P3IVQXyeM=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.102.0/go.mod h1:O+IHBEtgSQWLhtuwFzgWo0ztaSBaMeMhOToIV1Ul+W8=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.102.0 h1:X9cOU9eRDcVSiptZl53Rs170Upt48DMulq9qlYl5Khk=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.102.0/go.mod h1:LUCTFaxau7b/JSsVEKcdyayUYf8lB1oA7e00B57hJ6M=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.102.0 h1:7QHxeMnKzMXMw9oh5lnOHakfPpGSglxiZfbYUn6l6yc=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.102.0/go.mod h1:BtKaHa1yDHfhM9qjGUHweb0HgqFGxFSM7AMzwLXVR98=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage v0.102.0 h1:x4BjnaY7CAJS5JDmP+Zh148hqUDycbTb5c06MRSUx5c=
+github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage v0.102.0/go.mod h1:r9909Vq0VMC1lO+73E3TpGVFilV5FZ7FeAoQSqShFxU=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.102.0 h1:rEWcDImXvpM9fEFJ+GMfr7RzBrQ6QFAK+J2zSOsGYFY=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.102.0/go.mod h1:YwfX+TgF6zFTw0Wf3kmGIXSDp/AwyjoSOZrMwBxlsKk=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.102.0 h1:KyRiFvgoeZ0eaG5E0qfVsz7hWioC99oVCbW5KEithRU=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight v0.102.0/go.mod h1:eCaC2dI4+XNE/9vtOUuBf43S8TX+9aKF0q7teMkfBO0=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.102.0 h1:esk18lXw6/pF0kA8SNyXQ4sPyqTxB7CFVnlz+xWGYsw=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.102.0/go.mod h1:n5JjCb/abURaWp3bk4vfhFcYTS1SW+vfG5tqyrpfODs=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.102.0 h1:w+l3bz1a0KDNRz3plkDQN64aJlTBmhGzGFwqJRVFg4U=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.102.0/go.mod h1:7LoXgd02t4N/DR9gEO9EXpvUvPgCH07I3ceeQv83igk=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.102.0 h1:fbKVyaNxbn45GDyZS2vvmBzbhh2+H2rV/dxDEy8E9yY=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/k8s v0.102.0/go.mod h1:S8JnwOXoUJHtI8go+NlLFjMmtUl7iOjfPF8wl1clu88=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.102.0 h1:FjfDzPRjHmW0dfgWGQ+JoC1xh2cVmTRaV4bFOeO90ko=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics v0.102.0/go.mod h1:8IP4A6OLAjEqKAl6zuwzFSUI+iH8eCkpkV2aRyd+guk=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.102.0 h1:9h0/d5e6elcZE+YT69ev0stjZ5oE+SLDBjLexlQwL3c=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.102.0/go.mod h1:DMRqpQLvBdeUJ2jBCZ9fbGfabPY1q+7BnB47Z3wNpRM=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.102.0 h1:F6zV1vFlaWRtMPweOAl65692SAOQO/ke0oGcZO+kKpo=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.102.0/go.mod h1:0FriB/aLHPn7/cUy4sUnULzvM+KlJThfi44To3+lzlg=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.102.0 h1:PNLVcz8kJLE9V5kGnbBh277Bvl4WwiVZ+NbFbOB80WY=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.102.0/go.mod h1:cBbjwd8m4rBVgCQksUbAVQX1EoM5IuCyNQw2mzvibEM=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 h1:qsM5HhWpAfIMg8LdO4u+CHofu4UuCuJwg/M+ySO9uZA=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0/go.mod h1:wBJlGy9Wx6s7AxIMcSne2sGw73e5ZUy1AQ/duYwpFf8=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.102.0 h1:GIsVqsOT4Awe/kwKrrkOlApWQjpNx0HN2JQDaSYe1G0=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.102.0/go.mod h1:fuk7yN1ywQn03CebMPfC2BiWYNJYwl3DZjWAW7wLskg=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.102.0 h1:TN+wdhgwDn4zSr39fFOG0e7XJNCDwUSJb8HiBZ5orWk=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.102.0/go.mod h1:RNe02aDLdqqEsJ+nemN+TDJf016wKf87eZYuAEfhZyU=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.102.0 h1:CS9t6i//34KdqCw/kOmSydkmBtpOB7+1fLv1QN3kKyE=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.102.0/go.mod h1:VS66oUydCMwiWl1BFmLs7iNy4lGsfVYsriXr/d1fpAk=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.102.0 h1:BzJfpn0nAGZotwEESOj1JDYUm1hj7zWE80b12ubfVdg=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.102.0/go.mod h1:hCzgpzXbUUDUlETMDKYOpPaOhPjR9T6M3W3nAW5cGX4=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.102.0 h1:xBd9EXG9qvWwa2d7qDRVv/D/2gAQqn1zGbPqdjkd+O8=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.102.0/go.mod h1:e4pc6nkNyzBi5g2RgIRjJ1slRsOY5qHIbPu0E4oM3cE=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.102.0 h1:Qdp0oECMX98thAm5IVq7OqYJ1zb09vAT/4DtX76bT9I=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.102.0/go.mod h1:+7Em/4vP3Ob8n2yWz6+rtO3mKHvCFzcp47JAyrPXK2o=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.102.0 h1:p0uPl321knuQoZbghyYJyOVGNMkU25X/jZSgElCwMFQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.102.0/go.mod h1:x6gFY41QGX7drh0mSwo2iyixb4zxMnu6LRSnsGyYsRc=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.102.0 h1:/J1Q2tylp8ID+AIpCmfaArUyCPoSjY3nyZXdkpTw9J8=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.102.0/go.mod h1:lbNQBpvs40lInohZrqAbRZ+8r29GzfMfkbLV4fBPrzE=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.102.0 h1:svPhJAYMrLKhXOqGYQBz2OUtsYzFXLs1Qog+oT7pong=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.102.0/go.mod h1:mrL8X1FJnoFbJbPTSpguMjWddQlu96qXdScT4aXZvCE=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.102.0 h1:TBXs/pyiGWsuzi1hj7mxSAgD9MjCtUV9Hm5nzUa1SbA=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.102.0/go.mod h1:Bt9i6mylVtnB9CjBkZPIosfM9TAmqg9AKcbZVOvOIJQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.102.0 h1:pVJ792+Nzcv8nLlg18XOLOWEZ/dCK+Wo3Iak5TU8rz8=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.102.0/go.mod h1:DmkGhNL9nuSTg8fMhYNopMuF1Y3LFqu/FQHrvhBzME0=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.102.0 h1:vTzSrsPwDAzESCxxJyZuSwavFY5C0hzT8GBVjSuv8C0=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.102.0/go.mod h1:rHr3jqZrwpzOPNA3Ic/AvMoK7OIiW7keysE1NZsaIE0=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.102.0 h1:sfw+sVvoUNYOtlv6Zkoikxq/F8MVVQU+JHDO+YzYzE4=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.102.0/go.mod h1:MFHy47V/DLbE9Sr/Y6bfqEqbI1GBKWUvbOQBr8XrKwU=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.102.0 h1:EPmEtTgrlNzriEYZpkVOVDWlqWTUHoEqmM8oU/EpdkA=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.102.0/go.mod h1:qnLc/+jOVcsL1dF17ztBcf3juQ3f9bt6Wuf+Xxbrd9w=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.102.0 h1:vJL6lDaeI3pVA7ADnWKD3HMpI80BSrZ2UnGc+qkwqoY=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.102.0/go.mod h1:xtE7tds5j8PtI/wMuGb+Em5K9rJH8hm6t28Qe4QrpoU=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.102.0 h1:TvJYcU/DLRFCgHr7nT98k5D+qkZ4syKVxc8OJjv+K4c=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.102.0/go.mod h1:WzD3Ox7tywAQHknxAFpAC1oZJGItMp5mbvgUGjvzNY8=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.102.0 h1:iVdVcLq5uCvvG6bmOwdbRQbjWPsaQY/caDaIE4rJV80=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.102.0/go.mod h1:gSlq0MAX1balwTobJjaQtk/Znm3We2muLNaSLELHxUQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.102.0 h1:Nue1wHi8PobP90PXeB8vqoITOCZA/+Hs5Sy3fKfaTKo=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.102.0/go.mod h1:lFq+13yxprvJCoYrrTyFNj7XyouWGaKY6+lklVNKP8o=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.102.0 h1:J8GFYxKLWG1360XRukc1tY5K9BF80MFXcO91UpCMgcQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.102.0/go.mod h1:GNxigQNap2jyOEPdOedAKqCbh61y576ND4BKn/7i8xY=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.102.0 h1:IgLMHSuraJzxLqVeM7xU7aZPcXS5/eoVnX+HBuFGQ6E=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.102.0/go.mod h1:hG8EmxUvgXIiKTG6+UVcMhFeIN6UD/bswP7WYpQ2lCc=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0 h1:4VQidhCgkJiBvBDMOukr5ixrf5uP66iW5Hb+CFsb+4E=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0/go.mod h1:nMto9zkv0vD8YI3oGZFZS2Uu7k2oHt1d+xUHN/ofUYo=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.102.0 h1:mADBTNQknqXT6AiO3S8MvG2LwAum2K0fMZuWt5UyMNw=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.102.0/go.mod h1:Oh95IxRZCHq+CUwADrUCkON0/nSsnKLGqT7qW1ZQx/Q=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.102.0 h1:rHLWscq00lDfTW0BYuX2EEH9w1+t7X7ER9PcFjEowso=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.102.0/go.mod h1:+Vlutd4t2XluxHYbIAfZiz3z5uWbsbiIUpipV5AnLtk=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.102.0 h1:XpfktvTdnq+UAsn/RzdVX57rEcJiwcU/HQZDCZJdl8A=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.102.0/go.mod h1:XC8GlAwdRuxQR1yU9fMKKyOMFNKSTDZykuAGl1d7Y+8=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.102.0 h1:5M7I78lyGsH+Xyy4NoXKM/UUCa52aZQiPcSX6so6x94=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.102.0/go.mod h1:BEQy0zEel5uIOTEFBBmvQJ4A32R6nKLtSMtC6ylLI8k=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.102.0 h1:7CHzBkwrwfKBAYid7ii7CKO7kxSVVruMJKEnXFfO8ig=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.102.0/go.mod h1:OSi85ea3BWIrFqrB6q1QN1F5sCfTzJS6ECGD2Bk30JQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.102.0 h1:q4VV17TxeMm0FOeyFXAO4gSRf2ZLtKTh0/l5goxhRsY=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.102.0/go.mod h1:FlP/8TVT768TAh5kpvVX3AQ5/UXJWBuSSCFhO3fE+E0=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.102.0 h1:mj3t9/FAQZjcZJA2kjgbpz2fSK9yD/pYpmqKEWpHJ1A=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.102.0/go.mod h1:IIIjEblgrNISbDY7GPMMto9kEVIf0n9IeJoVru89kfY=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.102.0 h1:DaEYlVCn58GtkyYVK0IT/ZMjRFJ+BfmR0p9I0Eq42aQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.102.0/go.mod h1:u9x08rUCWdgI8Nle5XOMTCmxd0K26KTZvMMA5H8Xjyg=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.102.0 h1:+FmLD8ra+NQmVzscM1usSGIrgQ2R543TlAZxl4CPBjc=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.102.0/go.mod h1:otpZH7pH5hnhobMAiTRJyY8bAdWGecCaxkMUCW/3jpA=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.102.0 h1:mkRDKVWXfG1gTxwg69ttJoGmXOKNHAGsGms06DrwTlQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.102.0/go.mod h1:5F6hpHujLkLuEYmbbUXel2i3mBpwRJHmy8KTY3cbOVg=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.102.0 h1:AU4vK5OAGHgENTGcHwN0HDJFQQBohQhELmHjHen0exg=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.102.0/go.mod h1:pVWvccpyxdMB+wdC3wYzyEINCof2uAb0pirCVupVSNE=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.102.0 h1:nAJSknk1/WP0/d3SJWxqCdt/5S8N21CUPBlgiLs/Woc=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.102.0/go.mod h1:NZV5kpz6nqwG0pbcoBNWlc4TcN22KYLg/knoy3LuDI8=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.102.0 h1:ErBYnmZUSyPQjHPlyAeUOtQDax0tH2Ax/zOuklZp5Y8=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.102.0/go.mod h1:hHBt7x5lWzwwjdro0JQ/JLa64iFHCUzQwpMuiPptrSw=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.102.0 h1:6A/fYQFf6DaY1C+64eRnuua1HrCv8N83HaVrYTMtjnU=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.102.0/go.mod h1:AdK1CInrB6QOgBQCedOlSuBBq/hocWOMOkoKXFZPsy8=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.102.0 h1:9DEErMWgwGZFAINzn+ujIMkH1JtPcuPeS9RtWcMtc9g=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.102.0/go.mod h1:oau2EF+n4ZbtZ9V1YkK50CIjFB10bW0PN1XSsTnkn+U=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.102.0 h1:qfB41MXQbfK/Z/Jz9K0IFDLWkyLpXpCUlfdMFW93aS0=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.102.0/go.mod h1:LRW+jAuVDFUpV3maQWrXrQLEnkgK4rR7FsIb29RVfFg=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.102.0 h1:haaQludKUMYLQUjoPKVWzeP7qcHOF045FLd+WtihQDk=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.102.0/go.mod h1:8hPQU8tprx79lDkMq4aqxU3WEurKYGVe9fM2p1VYN9I=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.102.0 h1:0TQZTCWFmOQ4OAEIvIV1Ds74X1d5kQYalYJFivsuqzo=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.102.0/go.mod h1:2T6Wk8q8IoUGtbigSs1/IHCUEt7Q7t+tNRtcKlZSw5M=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.102.0 h1:6rv1z6GzSzbfOvukVp5hmDJ7+6dt2D29cIWBc8mLAjA=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.102.0/go.mod h1:iIF2e+vr5+31h8+CGIRmmp/mWxdFgf1ekgjzU9l+9SI=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.102.0 h1:tsqbx07W2X2Q6A0RIbGcFAudQmUiSPl31NV3fLQg798=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.102.0/go.mod h1:mEz6dKlUDrGXo2Fmu20X0b9VlExq9ngmhwFiPtnNcV0=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.102.0 h1:id/dVSgLHAKOypoL93pcOoB5H0/uLpJ8v5/DR6ShL0M=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.102.0/go.mod h1:z1hVuJZuYk0VjFyHlOuWiDiMsLMhAxBgvS/wN+E+Ki0=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver v0.102.0 h1:G1ehwzryuA4oLWXjHnrvtMSB8BtRSE22/Qc+vu2YwYk=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver v0.102.0/go.mod h1:WbkG6SPeesheMtvjXwRg6PIk2jIKK20kEEPMr36MYrE=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.102.0 h1:czJBjI4rZ+FNrdq/MkLQP4f6tsB3XIwN3mVXZOiIYcM=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.102.0/go.mod h1:eRViM57aYPXdI8bH1gMcpc02gIP4+QW5bXPjZiJLwgU=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.102.0 h1:2JxSNc2Tw+JTsNqEuUM4fKJLBSSxhbQYRgLoasvkTH0=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.102.0/go.mod h1:JjaTMPGUbc2OhwocO/xj5HLXO99tdmNYnsXOqie01kg=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.102.0 h1:HTGSfx2HzfudY1Uczw9yTBJnGBmTVFYzpGH1z+oD0nU=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.102.0/go.mod h1:Hlz24+Ah6Ojk0FUKNb1watRmTbLEru35+feroKA7dvQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.102.0 h1:2D3niNAKkr+NRVmAJW0bquSjzHUL6Pf1qQRLRPwA13M=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.102.0/go.mod h1:h0uqwH7b+NGDfFFWTjoGErMdYRdCqP1Az1/G+tfG024=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.102.0 h1:RB12IhSeXEt1y6nGtVvlqWtuzR3q/u33WFkjnot5mVg=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.102.0/go.mod h1:F6Nl+wlY/83a4wu2T6X6L4m0+ZBkzNqWjmyzYlHIQ3Q=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.102.0 h1:VZS44fpY1A2nie/ocyYjyqnr/qD6iuxYR9BG+c0W0Gk=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.102.0/go.mod h1:KD3hYX7Zeh6ZHthLgBlUt//hlJsaLV8hH6fCvZgTW8c=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.102.0 h1:Pemo9pZa3VMYdrM/bss3f0qqVyBzPSulOBQL8VQcgN8=
+github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.102.0/go.mod h1:fvjAM+jOQdiXCmAENKH/eWxBBqTaImbq3lpoBI4X5Ek=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
@@ -941,8 +1085,9 @@ github.com/openshift/api v0.0.0-20210521075222-e273a339932a/go.mod h1:izBmoXbUu3
github.com/openshift/build-machinery-go v0.0.0-20210423112049-9415d7ebd33e/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 h1:ZHRIMCFIJN1p9LsJt4HQ+akDrys4PrYnXzOWI5LK03I=
github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142/go.mod h1:fjS8r9mqDVsPb5td3NehsNOAWa4uiFkYEfVZioQ2gH0=
-github.com/openzipkin/zipkin-go v0.4.2 h1:zjqfqHjUpPmB3c1GlCvvgsM1G4LkvqQbBDueDOCg/jA=
-github.com/openzipkin/zipkin-go v0.4.2/go.mod h1:ZeVkFjuuBiSy13y8vpSDCjMi9GoI3hPpCJSBx/EYFhY=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg=
+github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c=
github.com/outcaste-io/ristretto v0.2.1 h1:KCItuNIGJZcursqHr3ghO7fc5ddZLEHspL9UR0cQM64=
github.com/outcaste-io/ristretto v0.2.1/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac=
github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0=
@@ -952,6 +1097,7 @@ github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0Mw
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.7.0 h1:7utD74fnzVc/cpcyy8sjrlFr5vYpypUixARcHIMIGuI=
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
@@ -977,46 +1123,53 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:Om
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI=
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
-github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
+github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
+github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.52.3 h1:5f8uj6ZwHSscOGNdIQg6OiZv/ybiK2CO2q2drVZAQSA=
-github.com/prometheus/common v0.52.3/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
+github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE=
+github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o=
-github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g=
+github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek=
+github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk=
github.com/prometheus/prometheus v0.51.2-0.20240405174432-b4a973753c6e h1:UmqAuY2OyDoog8+l5FybViJE5B2r+UxVGCUwFTsY5AA=
github.com/prometheus/prometheus v0.51.2-0.20240405174432-b4a973753c6e/go.mod h1:+0ld+ozir7zWFcHA2vVpWAKxXakIioEjPPNOqH+J3ZA=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/relvacode/iso8601 v1.4.0 h1:GsInVSEJfkYuirYFxa80nMLbH2aydgZpIf52gYZXUJs=
github.com/relvacode/iso8601 v1.4.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
-github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo=
github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@@ -1036,8 +1189,8 @@ github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod
github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg=
github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI=
github.com/shirou/gopsutil/v3 v3.22.12/go.mod h1:Xd7P1kwZcp5VW52+9XsirIKd/BROzbb2wdX3Kqlz9uI=
-github.com/shirou/gopsutil/v3 v3.24.3 h1:eoUGJSmdfLzJ3mxIhmOAhgKEKgQkeOwKpz1NbhVnuPE=
-github.com/shirou/gopsutil/v3 v3.24.3/go.mod h1:JpND7O217xa72ewWz9zN2eIIkPWsDN/3pl0H8Qt0uwg=
+github.com/shirou/gopsutil/v3 v3.24.4 h1:dEHgzZXt4LMNm+oYELpzl9YCqV65Yr/6SfrvgRBtXeU=
+github.com/shirou/gopsutil/v3 v3.24.4/go.mod h1:lTd2mdiOspcqLgAnr9/nGi71NkeMpWKdmhuxm9GusH8=
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
@@ -1054,20 +1207,32 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
+github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
+github.com/stormcat24/protodep v0.1.8 h1:FOycjjkjZiastf21aRoCjtoVdhsoBE8mZ0RvY6AHqFE=
+github.com/stormcat24/protodep v0.1.8/go.mod h1:6OoSZD5GGomKfmH1LvfJxNIRvYhewFXH5+eNv8h4wOM=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
@@ -1089,6 +1254,7 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
@@ -1112,7 +1278,12 @@ github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0h
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
+github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ=
github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 h1:+UB2BJA852UkGH42H+Oee69djmxS3ANzl2b/JtT1YiA=
@@ -1132,6 +1303,8 @@ github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -1143,8 +1316,9 @@ github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/zorkian/go-datadog-api v2.30.0+incompatible h1:R4ryGocppDqZZbnNc5EDR8xGWF/z/MxzWnqTUijDQes=
github.com/zorkian/go-datadog-api v2.30.0+incompatible/go.mod h1:PkXwHX9CUQa/FpB9ZwAD45N1uhCW4MT/Wj7m36PbKss=
-go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
-go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
+go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A=
go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY=
@@ -1155,135 +1329,148 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/collector v0.98.0 h1:O7bpARGWzNfFQEYevLl4iigDrpGTJY3vV/kKqNZzMOk=
-go.opentelemetry.io/collector v0.98.0/go.mod h1:fvPM+tBML07uvAP1MV2msYPSYJ9U/lgE1jDb3AFBaMM=
-go.opentelemetry.io/collector/component v0.98.0 h1:0TMaBOyCdABiVLFdGOgG8zd/1IeGldCinYonbY08xWk=
-go.opentelemetry.io/collector/component v0.98.0/go.mod h1:F6zyQLsoExl6r2q6WWZm8rmSSALbwG2zwIHLrMzZVio=
-go.opentelemetry.io/collector/config/configauth v0.98.0 h1:FPffZ1dRL6emStrDUEGpL0rCChbUZNAQgpArXD0SESI=
-go.opentelemetry.io/collector/config/configauth v0.98.0/go.mod h1:5pMzf2zgFwS7tujNq0AtOOli5vxIvnrNi7JlZwrBOFo=
-go.opentelemetry.io/collector/config/configcompression v1.5.0 h1:FTxKbFPN4LznRCH/GQ+b+0tAWmg80Y2eEka79S2sLZ0=
-go.opentelemetry.io/collector/config/configcompression v1.5.0/go.mod h1:O0fOPCADyGwGLLIf5lf7N3960NsnIfxsm6dr/mIpL+M=
-go.opentelemetry.io/collector/config/configgrpc v0.98.0 h1:4yP/TphwQnbgLpJ72NymXaERVjLjuDAQp4iDKCTcv5g=
-go.opentelemetry.io/collector/config/configgrpc v0.98.0/go.mod h1:tIng0xx1XlVr4I0YG5bNpts0hZDjwzN3Jkz6cKaSH/s=
-go.opentelemetry.io/collector/config/confighttp v0.98.0 h1:pW7gR34TTXcrCHJgemL6A4VBVBS2NyDAkruSMvQj1Vo=
-go.opentelemetry.io/collector/config/confighttp v0.98.0/go.mod h1:M9PMtiKrTJMG8i3SqJ+AUVKhR6sa3G/8S2F1+Dxkkr0=
-go.opentelemetry.io/collector/config/confignet v0.98.0 h1:pXDBb2hFe10T/NMHlL/oMgk1aFfe4NmmJFdFoioyC9o=
-go.opentelemetry.io/collector/config/confignet v0.98.0/go.mod h1:3naWoPss70RhDHhYjGACi7xh4NcVRvs9itzIRVWyu1k=
-go.opentelemetry.io/collector/config/configopaque v1.5.0 h1:WJzgmsFU2v63BypPBNGL31ACwWn6PwumPJNpLZplcdE=
-go.opentelemetry.io/collector/config/configopaque v1.5.0/go.mod h1:/otnfj2E8r5EfaAdNV4qHkTclmiBCZXaahV5EcLwT7k=
-go.opentelemetry.io/collector/config/configretry v0.98.0 h1:gZRenX9oMLJmQ/CD8YwFNl9YYl68RtcD0RYSCJhrMAk=
-go.opentelemetry.io/collector/config/configretry v0.98.0/go.mod h1:uRdmPeCkrW9Zsadh2WEbQ1AGXGYJ02vCfmmT+0g69nY=
-go.opentelemetry.io/collector/config/configtelemetry v0.98.0 h1:f8RNZ1l/kYPPoxFmKKvTUli8iON7CMsm85KM38PVNts=
-go.opentelemetry.io/collector/config/configtelemetry v0.98.0/go.mod h1:YV5PaOdtnU1xRomPcYqoHmyCr48tnaAREeGO96EZw8o=
-go.opentelemetry.io/collector/config/configtls v0.98.0 h1:g+MADy01ge8iGC6v2tbJ5G27CWNG1BaJtmYdmpvm8e4=
-go.opentelemetry.io/collector/config/configtls v0.98.0/go.mod h1:9RHArziz0mNEEkti0kz5LIdvbQGT7/Unu/0whKKazHQ=
-go.opentelemetry.io/collector/config/internal v0.98.0 h1:wz/6ncawMX5cfIiXJEYSUm1g1U6iE/VxFRm4/WhVBPI=
-go.opentelemetry.io/collector/config/internal v0.98.0/go.mod h1:xPnEE6QaTSXr+ctYMSTBxI2qwTntTUM4cYk7OTm6Ugc=
-go.opentelemetry.io/collector/confmap v0.98.0 h1:qQreBlrqio1y7uhrAvr+W86YbQ6fw7StgkbYpvJ2vVc=
-go.opentelemetry.io/collector/confmap v0.98.0/go.mod h1:BWKPIpYeUzSG6ZgCJMjF7xsLvyrvJCfYURl57E5vhiQ=
-go.opentelemetry.io/collector/confmap/converter/expandconverter v0.98.0 h1:lRhfcLr3gK5S/zn92h3clyOPnCvvNKs1WTMbtH4UvO0=
-go.opentelemetry.io/collector/confmap/converter/expandconverter v0.98.0/go.mod h1:vNMFTWe4dF05LsodUOc84OfxdlYVp1kCMuZzb41WfAk=
-go.opentelemetry.io/collector/confmap/provider/envprovider v0.98.0 h1:x/VsGlBj+DtJCXIucwzwcxiwnwAU8a6ALK6UN8fPdKQ=
-go.opentelemetry.io/collector/confmap/provider/envprovider v0.98.0/go.mod h1:BapTGXu7CYrQGNohbapPwTSt2Ty/k/c6Oemx9mSSiK4=
-go.opentelemetry.io/collector/confmap/provider/fileprovider v0.98.0 h1:SxDS+Yr8qE+ID58ELR5n0D+SUlqHKOZ72pK3YPFAelA=
-go.opentelemetry.io/collector/confmap/provider/fileprovider v0.98.0/go.mod h1:DEoB0d0k1iGt4KEABntL8AW9xYQ6E7fmgkM2/s8aXvM=
-go.opentelemetry.io/collector/confmap/provider/httpprovider v0.98.0 h1:C02SPbRPvrtmZ9TvsHWpz2TvHzqY5mNyEAlDdhax/a4=
-go.opentelemetry.io/collector/confmap/provider/httpprovider v0.98.0/go.mod h1:dzZKtykJio3Rm+G+Cmr15VV3xKp0PmFuh9Q9b3c1K7A=
-go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.98.0 h1:04zGXVQZ8D6nvoPX8AaqxWxGHNNVsGR78E+tY+2VQr8=
-go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.98.0/go.mod h1:+UrRiugWaQPssz4mgEgQQo640f2bDUCFlo2Xr0/5ulc=
-go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.98.0 h1:JYpDN0OnMsu0awk0rjaYEIko9hFzzBJ6+2U5W2iVvUE=
-go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.98.0/go.mod h1:xrXL804nBum1PgbvmJQ4I+hyW+DU4xBGO3MKMiYFX6E=
-go.opentelemetry.io/collector/connector v0.98.0 h1:1ifadXqOtB5bZ+OocLVlzF0zltWjP70E3+xYt2fJnMg=
-go.opentelemetry.io/collector/connector v0.98.0/go.mod h1:OFii9qa2ZgktI61/r0gWDsGjXtpEe+qXC8+0o4ZySeA=
-go.opentelemetry.io/collector/consumer v0.98.0 h1:47zJ5HFKXVA0RciuwkZnPU5W8j0TYUxToB1/zzzgEhs=
-go.opentelemetry.io/collector/consumer v0.98.0/go.mod h1:c2edTq38uVJET/NE6VV7/Qpyznnlz8b6VE7J6TXD57c=
-go.opentelemetry.io/collector/exporter v0.98.0 h1:eN2qtkiwpeX9gBu9JZw1k/CZ3N9wZE1aGJ1A0EvwJ7w=
-go.opentelemetry.io/collector/exporter v0.98.0/go.mod h1:GCW46a0VAuW7nljlW//GgFXI+8mSrJjrdEKVO9icExE=
-go.opentelemetry.io/collector/exporter/loggingexporter v0.98.0 h1:2DNfziYl0w8Sq9bPdYlPpn5MLLQGB73LB7O1BIYQxA4=
-go.opentelemetry.io/collector/exporter/loggingexporter v0.98.0/go.mod h1:SBuTQ0sA3fEd/jAJFAxjTX8Ndwkc4Mtkc6gsz115S+8=
-go.opentelemetry.io/collector/exporter/otlpexporter v0.98.0 h1:uhiR/luaJCwMnvvkIS/gIxBbSAp+/vbqeC3AXmuc/kg=
-go.opentelemetry.io/collector/exporter/otlpexporter v0.98.0/go.mod h1:1ySnK/6Cl+67FTP6ty04PX9nrXPYFPuBqZ+Xn9Jzz6Y=
-go.opentelemetry.io/collector/exporter/otlphttpexporter v0.98.0 h1:+6mRqTgoJxXxuPwI8s5fMKm0mLfwVwJgD2EB7gUNNlE=
-go.opentelemetry.io/collector/exporter/otlphttpexporter v0.98.0/go.mod h1:uGocxqpbUrZDwZz6JBKsvNCyDLrS/pnVpn4BUuPauFw=
-go.opentelemetry.io/collector/extension v0.98.0 h1:08B5ipEsoNmPHY96j5EUsUrFre01GOZ4zgttUDtPUkY=
-go.opentelemetry.io/collector/extension v0.98.0/go.mod h1:fZ1Hnnahszl5j3xcW2sMRJ0FLWDOFkFMQeVDP0Se7i8=
-go.opentelemetry.io/collector/extension/auth v0.98.0 h1:7b1jioijJbTMqaOCrz5Hoqf+zJn2iPlGmtN7pXLNWbA=
-go.opentelemetry.io/collector/extension/auth v0.98.0/go.mod h1:gssWC4AxAwAEKI2CqS93lhjWffsVdzD8q7UGL6LaRr0=
-go.opentelemetry.io/collector/extension/ballastextension v0.98.0 h1:EPzsYpiSY4vAfzJMqhVK6bIh+qZRmXVskaNlRFKjA0w=
-go.opentelemetry.io/collector/extension/ballastextension v0.98.0/go.mod h1:IY/JNP0g+tUUe/w5YHgBYwv5XlH4eqo5d4th+RGROFU=
-go.opentelemetry.io/collector/extension/zpagesextension v0.98.0 h1:JfvsDpTwAhA9au8/4vmONRh0OBVU6n36seb41JD/mTQ=
-go.opentelemetry.io/collector/extension/zpagesextension v0.98.0/go.mod h1:t1zDwy6kYp4w1JgcGHMvdGbKYHqWpK00bB1AEQ0Oqlc=
-go.opentelemetry.io/collector/featuregate v1.5.0 h1:uK8qnYQKz1TMkK+FDTFsywg/EybW/gbnOUaPNUkRznM=
-go.opentelemetry.io/collector/featuregate v1.5.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w=
-go.opentelemetry.io/collector/otelcol v0.98.0 h1:sUk49Wqw+VBYeDynEA+GSeVEusFvzFW3KuF2vfDbyo0=
-go.opentelemetry.io/collector/otelcol v0.98.0/go.mod h1:dW3UzuaiaNTddjKajk3Tp2Y7muDvYJdQz2yGUOE53gs=
-go.opentelemetry.io/collector/pdata v1.5.0 h1:1fKTmUpr0xCOhP/B0VEvtz7bYPQ45luQ8XFyA07j8LE=
-go.opentelemetry.io/collector/pdata v1.5.0/go.mod h1:TYj8aKRWZyT/KuKQXKyqSEvK/GV+slFaDMEI+Ke64Yw=
-go.opentelemetry.io/collector/pdata/testdata v0.98.0 h1:8gohV+LFXqMzuDwfOOQy9GcZBOX0C9xGoQkoeXFTzmI=
-go.opentelemetry.io/collector/pdata/testdata v0.98.0/go.mod h1:B/IaHcf6+RtxI292CZu9TjfYQdi1n4+v6b8rHEonpKs=
-go.opentelemetry.io/collector/processor v0.98.0 h1:onrg8a99lToytbHF148Bg9a7DfNk31B+p6UHouiiVTw=
-go.opentelemetry.io/collector/processor v0.98.0/go.mod h1:QxgzjmJI12DQWN0LIHmZBOR7HRzPuVWFW4oqTdrS1ho=
-go.opentelemetry.io/collector/processor/batchprocessor v0.98.0 h1:iM4fMLGig3GKmz5XNtOPKDsnCnvbi0+UHYaWsx/aSRc=
-go.opentelemetry.io/collector/processor/batchprocessor v0.98.0/go.mod h1:ROnuUkZJgpKEIDf3AIVjgRGNI7KPqCKPXsw8whL6Hzs=
-go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.98.0 h1:1jjiC3POfIluGLVM+6y8nolKEI95/vlHAvDmIOatags=
-go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.98.0/go.mod h1:VQDDrhQbIoelGF+fKzy6vCQM3hWDCH2YFaZKqgTDmGk=
-go.opentelemetry.io/collector/receiver v0.98.0 h1:qw6JYwm+sHcZvM1DByo3QlGe6yGHuwd0yW4hEPVqYKU=
-go.opentelemetry.io/collector/receiver v0.98.0/go.mod h1:AwIWn+KnquTR+kbhXQrMH+i2PvTCFldSIJznBWFYs0s=
-go.opentelemetry.io/collector/receiver/otlpreceiver v0.98.0 h1:j7lfLwc5o1dtXIPXU8LjmxadejmJVRHN57ZYGH33Wq4=
-go.opentelemetry.io/collector/receiver/otlpreceiver v0.98.0/go.mod h1:uWDBDxaWuzF1U5S2UIhstO0+Q8aUiwiUu8uO1IYN2XQ=
-go.opentelemetry.io/collector/semconv v0.98.0 h1:zO4L4TmlxXoYu8UgPeYElGY19BW7wPjM+quL5CzoOoY=
-go.opentelemetry.io/collector/semconv v0.98.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
-go.opentelemetry.io/collector/service v0.98.0 h1:lLJ7VXPXcV62fSISh4GuNd5ti6WvKje76NSgezc3ydo=
-go.opentelemetry.io/collector/service v0.98.0/go.mod h1:wB7ozvZTHtMefb5KTYy5nyrVYWpGk8teq8jWFs4blIU=
-go.opentelemetry.io/contrib/config v0.4.0 h1:Xb+ncYOqseLroMuBesGNRgVQolXcXOhMj7EhGwJCdHs=
-go.opentelemetry.io/contrib/config v0.4.0/go.mod h1:drNk2xRqLWW4/amk6Uh1S+sDAJTc7bcEEN1GfJzj418=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
-go.opentelemetry.io/contrib/propagators/b3 v1.25.0 h1:QU8UEKyPqgr/8vCC9LlDmkPnfFmiWAUF9GtJdcLz+BU=
-go.opentelemetry.io/contrib/propagators/b3 v1.25.0/go.mod h1:qonC7wyvtX1E6cEpAR+bJmhcGr6IVRGc/f6ZTpvi7jA=
-go.opentelemetry.io/contrib/zpages v0.50.0 h1:hKC5asr83xDN4ErwSHVdk3gv053pZiF8SZKmS86IPEw=
-go.opentelemetry.io/contrib/zpages v0.50.0/go.mod h1:8WovRn95fZdaX/dr3e4h7D8IqiVsnZ+WxY0Yn4LyU3k=
-go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k=
-go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg=
-go.opentelemetry.io/otel/bridge/opencensus v1.25.0 h1:0o/9KwAgxjK+3pMV0pwIF5toYHqDsPmQhfrBvKaG6mU=
-go.opentelemetry.io/otel/bridge/opencensus v1.25.0/go.mod h1:rZyTdpmRqoV+PpUn6QlruxJp/kE4765rPy0pP6mRDk8=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.25.0 h1:hDKnobznDpcdTlNzO0S/owRB8tyVr1OoeZZhDoqY+Cs=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.25.0/go.mod h1:kUDQaUs1h8iTIHbQTk+iJRiUvSfJYMMKTtMCaiVu7B0=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.25.0 h1:Wc4hZuYXhVqq+TfRXLXlmNIL/awOanGx8ssq3ciDQxc=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.25.0/go.mod h1:BydOvapRqVEc0DVz27qWBX2jq45Ca5TI9mhZBDIdweY=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0 h1:dT33yIHtmsqpixFsSQPwNeY5drM9wTcoL8h0FWF4oGM=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0/go.mod h1:h95q0LBGh7hlAC08X2DhSeyIG02YQ0UyioTCVAqRPmc=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.25.0 h1:vOL89uRfOCCNIjkisd0r7SEdJF3ZJFyCNY34fdZs8eU=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.25.0/go.mod h1:8GlBGcDk8KKi7n+2S4BT/CPZQYH3erLu0/k64r1MYgo=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0 h1:Mbi5PKN7u322woPa85d7ebZ+SOvEoPvoiBu+ryHWgfA=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0/go.mod h1:e7ciERRhZaOZXVjx5MiL8TK5+Xv7G5Gv5PA2ZDEJdL8=
-go.opentelemetry.io/otel/exporters/prometheus v0.47.0 h1:OL6yk1Z/pEGdDnrBbxSsH+t4FY1zXfBRGd7bjwhlMLU=
-go.opentelemetry.io/otel/exporters/prometheus v0.47.0/go.mod h1:xF3N4OSICZDVbbYZydz9MHFro1RjmkPUKEvar2utG+Q=
-go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.25.0 h1:d7nHbdzU84STOiszaOxQ3kw5IwkSmHsU5Muol5/vL4I=
-go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.25.0/go.mod h1:yiPA1iZbb/EHYnODXOxvtKuB0I2hV8ehfLTEWpl7BJU=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.25.0 h1:0vZZdECYzhTt9MKQZ5qQ0V+J3MFu4MQaQ3COfugF+FQ=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.25.0/go.mod h1:e7iXx3HjaSSBXfy9ykVUlupS2Vp7LBIBuT21ousM2Hk=
-go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA=
-go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s=
-go.opentelemetry.io/otel/sdk v1.25.0 h1:PDryEJPC8YJZQSyLY5eqLeafHtG+X7FWnf3aXMtxbqo=
-go.opentelemetry.io/otel/sdk v1.25.0/go.mod h1:oFgzCM2zdsxKzz6zwpTZYLLQsFwc+K0daArPdIhuxkw=
-go.opentelemetry.io/otel/sdk/metric v1.25.0 h1:7CiHOy08LbrxMAp4vWpbiPcklunUshVpAvGBrdDRlGw=
-go.opentelemetry.io/otel/sdk/metric v1.25.0/go.mod h1:LzwoKptdbBBdYfvtGCzGwk6GWMA3aUzBOwtQpR6Nz7o=
-go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM=
-go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I=
-go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI=
-go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY=
+go.opentelemetry.io/collector v0.102.1 h1:M/ciCcReQsSDYG9bJ2Qwqk7pQILDJ2bM/l0MdeCAvJE=
+go.opentelemetry.io/collector v0.102.1/go.mod h1:yF1lDRgL/Eksb4/LUnkMjvLvHHpi6wqBVlzp+dACnPM=
+go.opentelemetry.io/collector/component v0.102.1 h1:66z+LN5dVCXhvuVKD1b56/3cYLK+mtYSLIwlskYA9IQ=
+go.opentelemetry.io/collector/component v0.102.1/go.mod h1:XfkiSeImKYaewT2DavA80l0VZ3JjvGndZ8ayPXfp8d0=
+go.opentelemetry.io/collector/config/configauth v0.102.1 h1:LuzijaZulMu4xmAUG8WA00ZKDlampH+ERjxclb40Q9g=
+go.opentelemetry.io/collector/config/configauth v0.102.1/go.mod h1:kTzfI5fnbMJpm2wycVtQeWxFAtb7ns4HksSb66NIhX8=
+go.opentelemetry.io/collector/config/configcompression v1.9.0 h1:B2q6XMO6xiF2s+14XjqAQHGY5UefR+PtkZ0WAlmSqpU=
+go.opentelemetry.io/collector/config/configcompression v1.9.0/go.mod h1:6+m0GKCv7JKzaumn7u80A2dLNCuYf5wdR87HWreoBO0=
+go.opentelemetry.io/collector/config/configgrpc v0.102.1 h1:6Plnfx+xw/JH8k11MkljGoysPfn1u7hHbO2evteOTeE=
+go.opentelemetry.io/collector/config/configgrpc v0.102.1/go.mod h1:Kk3XOSar3QTzGDS8N8M38DVlOzUD7STS2obczO9q43I=
+go.opentelemetry.io/collector/config/confighttp v0.102.1 h1:tPw1Xf2PfDdrXoBKLY5Sd4Dh8FNm5i+6DKuky9XraIM=
+go.opentelemetry.io/collector/config/confighttp v0.102.1/go.mod h1:k4qscfjxuaDQmcAzioxmPujui9VSgW6oal3WLxp9CzI=
+go.opentelemetry.io/collector/config/confignet v0.102.1 h1:nSiAFQMzNCO4sDBztUxY73qFw4Vh0hVePq8+3wXUHtU=
+go.opentelemetry.io/collector/config/confignet v0.102.1/go.mod h1:pfOrCTfSZEB6H2rKtx41/3RN4dKs+X2EKQbw3MGRh0E=
+go.opentelemetry.io/collector/config/configopaque v1.9.0 h1:jocenLdK/rVG9UoGlnpiBxXLXgH5NhIXCrVSTyKVYuA=
+go.opentelemetry.io/collector/config/configopaque v1.9.0/go.mod h1:8v1yaH4iYjcigbbyEaP/tzVXeFm4AaAsKBF9SBeqaG4=
+go.opentelemetry.io/collector/config/configretry v0.102.1 h1:J5/tXBL8P7d7HT5dxsp2H+//SkwDXR66Z9UTgRgtAzk=
+go.opentelemetry.io/collector/config/configretry v0.102.1/go.mod h1:P+RA0IA+QoxnDn4072uyeAk1RIoYiCbxYsjpKX5eFC4=
+go.opentelemetry.io/collector/config/configtelemetry v0.102.1 h1:f/CYcrOkaHd+COIJ2lWnEgBCHfhEycpbow4ZhrGwAlA=
+go.opentelemetry.io/collector/config/configtelemetry v0.102.1/go.mod h1:WxWKNVAQJg/Io1nA3xLgn/DWLE/W1QOB2+/Js3ACi40=
+go.opentelemetry.io/collector/config/configtls v0.102.1 h1:7fr+PU9BRg0HRc1Pn3WmDW/4WBHRjuo7o1CdG2vQKoA=
+go.opentelemetry.io/collector/config/configtls v0.102.1/go.mod h1:KHdrvo3cwosgDxclyiLWmtbovIwqvaIGeTXr3p5721A=
+go.opentelemetry.io/collector/config/internal v0.102.1 h1:HFsFD3xpHUuNHb8/UTz5crJw1cMHzsJQf/86sgD44hw=
+go.opentelemetry.io/collector/config/internal v0.102.1/go.mod h1:Vig3dfeJJnuRe1kBNpszBzPoj5eYnR51wXbeq36Zfpg=
+go.opentelemetry.io/collector/confmap v0.102.1 h1:wZuH+d/P11Suz8wbp+xQCJ0BPE9m5pybtUe74c+rU7E=
+go.opentelemetry.io/collector/confmap v0.102.1/go.mod h1:KgpS7UxH5rkd69CzAzlY2I1heH8Z7eNCZlHmwQBMxNg=
+go.opentelemetry.io/collector/confmap/converter/expandconverter v0.102.1 h1:s0RxnaABoRxtfvUeimZ0OOsF83wD/EK1tR2N5GZyst0=
+go.opentelemetry.io/collector/confmap/converter/expandconverter v0.102.1/go.mod h1:ZwSMlOSIzmrrSSVNoMPDr21SQx7E52bZFMQJSOZ+EhY=
+go.opentelemetry.io/collector/confmap/provider/envprovider v0.102.1 h1:4KLw0pTChIqDfw0ckZ411aQDw98pu2dDOqgBHXfJm8M=
+go.opentelemetry.io/collector/confmap/provider/envprovider v0.102.1/go.mod h1:f+IJBW0Sc96T79qj3GQtE1wQ0uWEwpslD785efKBl+c=
+go.opentelemetry.io/collector/confmap/provider/fileprovider v0.102.1 h1:nPhOtUbJHfTDqZqtvU76HmEz9iV4O/4/DSCZdnm0mpY=
+go.opentelemetry.io/collector/confmap/provider/fileprovider v0.102.1/go.mod h1:eJnr6YDQiocmoRBvsKj33bIc4wysq5hy/jmOApv1dSM=
+go.opentelemetry.io/collector/confmap/provider/httpprovider v0.102.1 h1:VsaGXqEUFost0mf2svhds6loYzPavkyY37nMQcqoTkc=
+go.opentelemetry.io/collector/confmap/provider/httpprovider v0.102.1/go.mod h1:lQocxKI32Zj1F3PR9UZfzykq50/mOI1mbyZ0729dphI=
+go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.102.1 h1:rEhPTqkGAezaFxJ8y/BL5m4vKTK3ZSpn+VcVLKnZo7Q=
+go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.102.1/go.mod h1:GxUZM23m3u4vURw/At2zEKW+5GwcuCNsHJNT/Wq/cFI=
+go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.102.1 h1:qmdaBIz0UnUKVitZzq+4HtO9zvRTwgNc/Q3b7kyf1NQ=
+go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.102.1/go.mod h1:nAckG/FkzAaPuwtEN2Na2+ij+2hdTjtXUtFBnlUqpFk=
+go.opentelemetry.io/collector/connector v0.102.1 h1:7lEwXmhzqtyZwz2bBUHzwV/CZqA8bhPPVJOi0cm9+Fk=
+go.opentelemetry.io/collector/connector v0.102.1/go.mod h1:DRlDYJXsFx1FKKxkdM2Ja52/xe+0bgmy0hA+wgKRUVI=
+go.opentelemetry.io/collector/consumer v0.102.1 h1:0CkgHhxwx4lI/m+hWjh607xyjooW5CObZ8hFQy5vvo0=
+go.opentelemetry.io/collector/consumer v0.102.1/go.mod h1:HoXqmrRV13jLnP3/Gg3fYNdRkDPoO7UW58hKiLyFF60=
+go.opentelemetry.io/collector/exporter v0.102.1 h1:4VURYgBNJscxfMhZWitzcwA1cig5a6pH0xZSpdECDnM=
+go.opentelemetry.io/collector/exporter v0.102.1/go.mod h1:1pmNxvrvvbWDW6PiGObICdj0eOSGV4Fzwpm5QA1GU54=
+go.opentelemetry.io/collector/exporter/loggingexporter v0.102.1 h1:LblufdV22DxB5NZa66CGCQZjadYTVxT+O5NR9YjNQ9Y=
+go.opentelemetry.io/collector/exporter/loggingexporter v0.102.1/go.mod h1:zmOEwiQlfvEHnakWNO1YFNubgWZvZee+5Wshuck5lZk=
+go.opentelemetry.io/collector/exporter/otlpexporter v0.102.1 h1:bOXE7u1iy0SKwH2mnVyIMKkvFIR9bn9iIm1Cf/CJlZU=
+go.opentelemetry.io/collector/exporter/otlpexporter v0.102.1/go.mod h1:4ya6xaUYvcXq9MQW0TbsR4QWkOJI02d/2Vt8plwdozA=
+go.opentelemetry.io/collector/exporter/otlphttpexporter v0.102.1 h1:9TaxHrkVtEdssDAHqV5yU9PARkFph7CvfLqC1wS6m+c=
+go.opentelemetry.io/collector/exporter/otlphttpexporter v0.102.1/go.mod h1:auKlkLfuUriyZ2CmV2dudJaVGB7ycZ+tTpypy2JNFEc=
+go.opentelemetry.io/collector/extension v0.102.1 h1:gAvE3w15q+Vv0Tj100jzcDpeMTyc8dAiemHRtJbspLg=
+go.opentelemetry.io/collector/extension v0.102.1/go.mod h1:XBxUOXjZpwYLZYOK5u3GWlbBTOKmzStY5eU1R/aXkIo=
+go.opentelemetry.io/collector/extension/auth v0.102.1 h1:GP6oBmpFJjxuVruPb9X40bdf6PNu9779i8anxa+wW6U=
+go.opentelemetry.io/collector/extension/auth v0.102.1/go.mod h1:U2JWz8AW1QXX2Ap3ofzo5Dn2fZU/Lglld97Vbh8BZS0=
+go.opentelemetry.io/collector/extension/ballastextension v0.102.1 h1:mrGwVheXTgnxdkWC9BXOO2Zk4oIO4mpfN5zzLI40bX4=
+go.opentelemetry.io/collector/extension/ballastextension v0.102.1/go.mod h1:UzWHLmeNg5wPTeUjgNE92JEJhSWX5S+1P0NCpyijPDA=
+go.opentelemetry.io/collector/extension/zpagesextension v0.102.1 h1:YV+ejCgOBJjACOi/l3ULeivOhh85FPE8T4UcFdWviyg=
+go.opentelemetry.io/collector/extension/zpagesextension v0.102.1/go.mod h1:/CZXg9/C64k85/k4bc7NFbCNP/MiPUZucbxPUN04ny4=
+go.opentelemetry.io/collector/featuregate v1.9.0 h1:mC4/HnR5cx/kkG1RKOQAvHxxg5Ktmd9gpFdttPEXQtA=
+go.opentelemetry.io/collector/featuregate v1.9.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U=
+go.opentelemetry.io/collector/filter v0.102.0 h1:2K4Q/l4b+tglMAQmxpscuCr/juyozyPx17Q6Dfm2FwU=
+go.opentelemetry.io/collector/filter v0.102.0/go.mod h1:zDVjFCeeVct7hYwejzx+aRC1dbHaPsvv/Ob1SvCiQjE=
+go.opentelemetry.io/collector/otelcol v0.102.1 h1:JdRG3ven+c5k703QpZG5bxJi4JJOnWaNP/EJvN+oYnI=
+go.opentelemetry.io/collector/otelcol v0.102.1/go.mod h1:kHf9KBXOLZXajR1On8XJbBBGcgh2I2+/mVVroPzOLJU=
+go.opentelemetry.io/collector/pdata v1.9.0 h1:qyXe3HEVYYxerIYu0rzgo1Tx2d1Zs6iF+TCckbHLFOw=
+go.opentelemetry.io/collector/pdata v1.9.0/go.mod h1:vk7LrfpyVpGZrRWcpjyy0DDZzL3SZiYMQxfap25551w=
+go.opentelemetry.io/collector/pdata/testdata v0.102.1 h1:S3idZaJxy8M7mCC4PG4EegmtiSaOuh6wXWatKIui8xU=
+go.opentelemetry.io/collector/pdata/testdata v0.102.1/go.mod h1:JEoSJTMgeTKyGxoMRy48RMYyhkA5vCCq/abJq9B6vXs=
+go.opentelemetry.io/collector/processor v0.102.1 h1:79NWs7kTgmgxOIQacuZyDf+mYWuoJZS07SHwZT7sZ4Y=
+go.opentelemetry.io/collector/processor v0.102.1/go.mod h1:sNM41tEHgv3YA/Dz9/6F8oCeObrqnKCGOMs7wS6Ldus=
+go.opentelemetry.io/collector/processor/batchprocessor v0.102.1 h1:s7TjD8k2d58x/Oj6P6PIm6R4zyBRdUPNbD9Zhiv0x0E=
+go.opentelemetry.io/collector/processor/batchprocessor v0.102.1/go.mod h1:RDgJIY8J6xstSncSDzvzkOSFoNGK8RqeuHfdoWxu6a8=
+go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.102.1 h1:aUDHYAMJFQR/NRTqerzJjHk4bbDLwReQnMQmMMyuYLo=
+go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.102.1/go.mod h1:u4QM5ntLlV+XIl0w5zEYa3qmjukGhtmjgqzrdG8QGus=
+go.opentelemetry.io/collector/receiver v0.102.1 h1:353t4U3o0RdU007JcQ4sRRzl72GHCJZwXDr8cCOcEbI=
+go.opentelemetry.io/collector/receiver v0.102.1/go.mod h1:pYjMzUkvUlxJ8xt+VbI1to8HMtVlv8AW/K/2GQQOTB0=
+go.opentelemetry.io/collector/receiver/otlpreceiver v0.102.1 h1:65/8lkVmOu6gwBw99W+QUQBeDC2qVTwlaiqy7/SpauY=
+go.opentelemetry.io/collector/receiver/otlpreceiver v0.102.1/go.mod h1:0hmxfFSSqKJjRGvgYjp/XvptbAgLhLguwNgJqMp7zd0=
+go.opentelemetry.io/collector/semconv v0.102.1 h1:zLhz2Gu//j7HHESFTGTrfKIaoS4r+lZFQDnGCOThggo=
+go.opentelemetry.io/collector/semconv v0.102.1/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw=
+go.opentelemetry.io/collector/service v0.102.1 h1:Lg7qrC4Zctd/OAlkpdsaZaUY+jLEGLLnOigfBLP2GW8=
+go.opentelemetry.io/collector/service v0.102.1/go.mod h1:L5Sh3461B1Zij7vpMMbi6M/SZicgrLB3UgbG0oUK0pA=
+go.opentelemetry.io/contrib/config v0.7.0 h1:b1rK5tGTuhhPirJiMxOcyQfZs76j2VapY6ODn3b2Dbs=
+go.opentelemetry.io/contrib/config v0.7.0/go.mod h1:8tdiFd8N5etOi3XzBmAoMxplEzI3TcL8dU5rM5/xcOQ=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0=
+go.opentelemetry.io/contrib/propagators/b3 v1.27.0 h1:IjgxbomVrV9za6bRi8fWCNXENs0co37SZedQilP2hm0=
+go.opentelemetry.io/contrib/propagators/b3 v1.27.0/go.mod h1:Dv9obQz25lCisDvvs4dy28UPh974CxkahRDUPsY7y9E=
+go.opentelemetry.io/contrib/zpages v0.52.0 h1:MPgkMy0Cp3O5EdfVXP0ss3ujhEibysTM4eszx7E7d+E=
+go.opentelemetry.io/contrib/zpages v0.52.0/go.mod h1:fqG5AFdoYru3A3DnhibVuaaEfQV2WKxE7fYE1jgDRwk=
+go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
+go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
+go.opentelemetry.io/otel/bridge/opencensus v1.27.0 h1:ao9aGGHd+G4YfjBpGs6vbkvt5hoC67STlJA9fCnOAcs=
+go.opentelemetry.io/otel/bridge/opencensus v1.27.0/go.mod h1:uRvWtAAXzyVOST0WMPX5JHGBaAvBws+2F8PcC5gMnTk=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 h1:bFgvUr3/O4PHj3VQcFEuYKvRZJX1SJDQ+11JXuSB3/w=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0/go.mod h1:xJntEd2KL6Qdg5lwp97HMLQDVeAhrYxmzFseAMDPQ8I=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 h1:CIHWikMsN3wO+wq1Tp5VGdVRTcON+DmOJSfDjXypKOc=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0/go.mod h1:TNupZ6cxqyFEpLXAZW7On+mLFL0/g0TE3unIYL91xWc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY=
+go.opentelemetry.io/otel/exporters/prometheus v0.49.0 h1:Er5I1g/YhfYv9Affk9nJLfH/+qCCVVg1f2R9AbJfqDQ=
+go.opentelemetry.io/otel/exporters/prometheus v0.49.0/go.mod h1:KfQ1wpjf3zsHjzP149P4LyAwWRupc6c7t1ZJ9eXpKQM=
+go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 h1:/jlt1Y8gXWiHG9FBx6cJaIC5hYx5Fe64nC8w5Cylt/0=
+go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0/go.mod h1:bmToOGOBZ4hA9ghphIc1PAf66VA8KOtsuy3+ScStG20=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o=
+go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
+go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
+go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI=
+go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A=
+go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI=
+go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw=
+go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
+go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
+go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
+go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
+go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI=
+go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU=
+go.uber.org/fx v1.18.2 h1:bUNI6oShr+OVFQeU8cDNbnN7VFsu+SsjHzUF51V/GAU=
+go.uber.org/fx v1.18.2/go.mod h1:g0V1KMQ66zIRk8bLu3Ea5Jt2w/cHlOIp4wdRsgh0JaY=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
@@ -1300,9 +1487,10 @@ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWP
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
-golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
-golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
-golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
+golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
+golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -1313,8 +1501,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
-golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
+golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
+golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -1340,18 +1528,20 @@ golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hM
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
-golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
+golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -1359,6 +1549,7 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -1385,17 +1576,18 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
-golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
-golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
+golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
-golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
+golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo=
+golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1409,12 +1601,13 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
-golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1425,6 +1618,7 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1481,6 +1675,7 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1488,19 +1683,21 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
+golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
-golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
-golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
-golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
+golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
+golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
+golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1514,22 +1711,24 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
-golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
+golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
@@ -1541,6 +1740,8 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -1574,8 +1775,8 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
-golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
+golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
+golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1598,8 +1799,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.168.0 h1:MBRe+Ki4mMN93jhDDbpuRLjRddooArz4FeSObvUMmjY=
-google.golang.org/api v0.168.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg=
+google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY=
+google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1617,6 +1818,7 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
@@ -1640,16 +1842,18 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4=
-google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
+google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 h1:P8OJ/WCl/Xo4E4zoe4/bifHpSmmKwARqyqE4nW6J2GQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 h1:Q2RxlXqh1cgzzUgV261vBO2jI5R/3DD1J2pM0nI4NhU=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
@@ -1661,8 +1865,8 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
-google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
+google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
+google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1676,9 +1880,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
-google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
+google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1691,12 +1894,15 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -1742,8 +1948,8 @@ k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iL
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ=
-k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak=
+k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/pkg/config/config_factory.go b/pkg/config/config_factory.go
index 80a37403d3..93e93ba570 100644
--- a/pkg/config/config_factory.go
+++ b/pkg/config/config_factory.go
@@ -43,26 +43,21 @@ func GetConfigProviderSettings(flags *flag.FlagSet) otelcol.ConfigProviderSettin
}
// generate the MapProviders for the Config Provider Settings
- providers := []confmap.Provider{
- fileprovider.NewWithSettings(confmap.ProviderSettings{}),
- envprovider.NewWithSettings(confmap.ProviderSettings{}),
- yamlprovider.NewWithSettings(confmap.ProviderSettings{}),
- httpprovider.NewWithSettings(confmap.ProviderSettings{}),
- httpsprovider.NewWithSettings(confmap.ProviderSettings{}),
- s3provider.New(),
- }
-
- mapProviders := make(map[string]confmap.Provider, len(providers))
- for _, provider := range providers {
- mapProviders[provider.Scheme()] = provider
+ providers := []confmap.ProviderFactory{
+ fileprovider.NewFactory(),
+ envprovider.NewFactory(),
+ yamlprovider.NewFactory(),
+ httpprovider.NewFactory(),
+ httpsprovider.NewFactory(),
+ s3provider.NewFactory(),
}
// create Config Provider Settings
configProviderSettings := otelcol.ConfigProviderSettings{
ResolverSettings: confmap.ResolverSettings{
- URIs: loc,
- Providers: mapProviders,
- Converters: []confmap.Converter{expandconverter.New(confmap.ConverterSettings{})},
+ URIs: loc,
+ ProviderFactories: providers,
+ ConverterFactories: []confmap.ConverterFactory{expandconverter.NewFactory()},
},
}
diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
index 06b957349a..967e060747 100644
--- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
+++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
@@ -1,5 +1,12 @@
# Changes
+## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.3...compute/metadata/v0.3.0) (2024-04-15)
+
+
+### Features
+
+* **compute/metadata:** Add context aware functions ([#9733](https://github.com/googleapis/google-cloud-go/issues/9733)) ([e4eb5b4](https://github.com/googleapis/google-cloud-go/commit/e4eb5b46ee2aec9d2fc18300bfd66015e25a0510))
+
## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15)
diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go
index c17faa142a..f67e3c7eea 100644
--- a/vendor/cloud.google.com/go/compute/metadata/metadata.go
+++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go
@@ -23,7 +23,7 @@ import (
"context"
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net"
"net/http"
"net/url"
@@ -95,9 +95,9 @@ func (c *cachedValue) get(cl *Client) (v string, err error) {
return c.v, nil
}
if c.trim {
- v, err = cl.getTrimmed(c.k)
+ v, err = cl.getTrimmed(context.Background(), c.k)
} else {
- v, err = cl.Get(c.k)
+ v, err = cl.GetWithContext(context.Background(), c.k)
}
if err == nil {
c.v = v
@@ -197,18 +197,32 @@ func systemInfoSuggestsGCE() bool {
// We don't have any non-Linux clues available, at least yet.
return false
}
- slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
+ slurp, _ := os.ReadFile("/sys/class/dmi/id/product_name")
name := strings.TrimSpace(string(slurp))
return name == "Google" || name == "Google Compute Engine"
}
-// Subscribe calls Client.Subscribe on the default client.
+// Subscribe calls Client.SubscribeWithContext on the default client.
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
- return defaultClient.Subscribe(suffix, fn)
+ return defaultClient.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) })
}
-// Get calls Client.Get on the default client.
-func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
+// SubscribeWithContext calls Client.SubscribeWithContext on the default client.
+func SubscribeWithContext(ctx context.Context, suffix string, fn func(ctx context.Context, v string, ok bool) error) error {
+ return defaultClient.SubscribeWithContext(ctx, suffix, fn)
+}
+
+// Get calls Client.GetWithContext on the default client.
+//
+// Deprecated: Please use the context aware variant [GetWithContext].
+func Get(suffix string) (string, error) {
+ return defaultClient.GetWithContext(context.Background(), suffix)
+}
+
+// GetWithContext calls Client.GetWithContext on the default client.
+func GetWithContext(ctx context.Context, suffix string) (string, error) {
+ return defaultClient.GetWithContext(ctx, suffix)
+}
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return defaultClient.ProjectID() }
@@ -288,8 +302,7 @@ func NewClient(c *http.Client) *Client {
// getETag returns a value from the metadata service as well as the associated ETag.
// This func is otherwise equivalent to Get.
-func (c *Client) getETag(suffix string) (value, etag string, err error) {
- ctx := context.TODO()
+func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
@@ -306,7 +319,7 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
}
suffix = strings.TrimLeft(suffix, "/")
u := "http://" + host + "/computeMetadata/v1/" + suffix
- req, err := http.NewRequest("GET", u, nil)
+ req, err := http.NewRequestWithContext(ctx, "GET", u, nil)
if err != nil {
return "", "", err
}
@@ -336,7 +349,7 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
- all, err := ioutil.ReadAll(res.Body)
+ all, err := io.ReadAll(res.Body)
if err != nil {
return "", "", err
}
@@ -354,19 +367,33 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
+//
+// Deprecated: Please use the context aware variant [Client.GetWithContext].
func (c *Client) Get(suffix string) (string, error) {
- val, _, err := c.getETag(suffix)
+ return c.GetWithContext(context.Background(), suffix)
+}
+
+// GetWithContext returns a value from the metadata service.
+// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
+//
+// If the GCE_METADATA_HOST environment variable is not defined, a default of
+// 169.254.169.254 will be used instead.
+//
+// If the requested metadata is not defined, the returned error will
+// be of type NotDefinedError.
+func (c *Client) GetWithContext(ctx context.Context, suffix string) (string, error) {
+ val, _, err := c.getETag(ctx, suffix)
return val, err
}
-func (c *Client) getTrimmed(suffix string) (s string, err error) {
- s, err = c.Get(suffix)
+func (c *Client) getTrimmed(ctx context.Context, suffix string) (s string, err error) {
+ s, err = c.GetWithContext(ctx, suffix)
s = strings.TrimSpace(s)
return
}
func (c *Client) lines(suffix string) ([]string, error) {
- j, err := c.Get(suffix)
+ j, err := c.GetWithContext(context.Background(), suffix)
if err != nil {
return nil, err
}
@@ -388,7 +415,7 @@ func (c *Client) InstanceID() (string, error) { return instID.get(c) }
// InternalIP returns the instance's primary internal IP address.
func (c *Client) InternalIP() (string, error) {
- return c.getTrimmed("instance/network-interfaces/0/ip")
+ return c.getTrimmed(context.Background(), "instance/network-interfaces/0/ip")
}
// Email returns the email address associated with the service account.
@@ -398,25 +425,25 @@ func (c *Client) Email(serviceAccount string) (string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
- return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email")
+ return c.getTrimmed(context.Background(), "instance/service-accounts/"+serviceAccount+"/email")
}
// ExternalIP returns the instance's primary external (public) IP address.
func (c *Client) ExternalIP() (string, error) {
- return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
+ return c.getTrimmed(context.Background(), "instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// ".c..internal".
func (c *Client) Hostname() (string, error) {
- return c.getTrimmed("instance/hostname")
+ return c.getTrimmed(context.Background(), "instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func (c *Client) InstanceTags() ([]string, error) {
var s []string
- j, err := c.Get("instance/tags")
+ j, err := c.GetWithContext(context.Background(), "instance/tags")
if err != nil {
return nil, err
}
@@ -428,12 +455,12 @@ func (c *Client) InstanceTags() ([]string, error) {
// InstanceName returns the current VM's instance ID string.
func (c *Client) InstanceName() (string, error) {
- return c.getTrimmed("instance/name")
+ return c.getTrimmed(context.Background(), "instance/name")
}
// Zone returns the current VM's zone, such as "us-central1-b".
func (c *Client) Zone() (string, error) {
- zone, err := c.getTrimmed("instance/zone")
+ zone, err := c.getTrimmed(context.Background(), "instance/zone")
// zone is of the form "projects//zones/".
if err != nil {
return "", err
@@ -460,7 +487,7 @@ func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
- return c.Get("instance/attributes/" + attr)
+ return c.GetWithContext(context.Background(), "instance/attributes/"+attr)
}
// ProjectAttributeValue returns the value of the provided
@@ -472,7 +499,7 @@ func (c *Client) InstanceAttributeValue(attr string) (string, error) {
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
- return c.Get("project/attributes/" + attr)
+ return c.GetWithContext(context.Background(), "project/attributes/"+attr)
}
// Scopes returns the service account scopes for the given account.
@@ -489,21 +516,30 @@ func (c *Client) Scopes(serviceAccount string) ([]string, error) {
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
// The suffix may contain query parameters.
//
-// Subscribe calls fn with the latest metadata value indicated by the provided
-// suffix. If the metadata value is deleted, fn is called with the empty string
-// and ok false. Subscribe blocks until fn returns a non-nil error or the value
-// is deleted. Subscribe returns the error value returned from the last call to
-// fn, which may be nil when ok == false.
+// Deprecated: Please use the context aware variant [Client.SubscribeWithContext].
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
+ return c.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) })
+}
+
+// SubscribeWithContext subscribes to a value from the metadata service.
+// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
+// The suffix may contain query parameters.
+//
+// SubscribeWithContext calls fn with the latest metadata value indicated by the
+// provided suffix. If the metadata value is deleted, fn is called with the
+// empty string and ok false. Subscribe blocks until fn returns a non-nil error
+// or the value is deleted. Subscribe returns the error value returned from the
+// last call to fn, which may be nil when ok == false.
+func (c *Client) SubscribeWithContext(ctx context.Context, suffix string, fn func(ctx context.Context, v string, ok bool) error) error {
const failedSubscribeSleep = time.Second * 5
// First check to see if the metadata value exists at all.
- val, lastETag, err := c.getETag(suffix)
+ val, lastETag, err := c.getETag(ctx, suffix)
if err != nil {
return err
}
- if err := fn(val, true); err != nil {
+ if err := fn(ctx, val, true); err != nil {
return err
}
@@ -514,7 +550,7 @@ func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) erro
suffix += "?wait_for_change=true&last_etag="
}
for {
- val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
+ val, etag, err := c.getETag(ctx, suffix+url.QueryEscape(lastETag))
if err != nil {
if _, deleted := err.(NotDefinedError); !deleted {
time.Sleep(failedSubscribeSleep)
@@ -524,7 +560,7 @@ func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) erro
}
lastETag = etag
- if err := fn(val, ok); err != nil || !ok {
+ if err := fn(ctx, val, ok); err != nil || !ok {
return err
}
}
diff --git a/vendor/cloud.google.com/go/compute/metadata/retry.go b/vendor/cloud.google.com/go/compute/metadata/retry.go
index 0f18f3cda1..3d4bc75ddf 100644
--- a/vendor/cloud.google.com/go/compute/metadata/retry.go
+++ b/vendor/cloud.google.com/go/compute/metadata/retry.go
@@ -27,7 +27,7 @@ const (
)
var (
- syscallRetryable = func(err error) bool { return false }
+ syscallRetryable = func(error) bool { return false }
)
// defaultBackoff is basically equivalent to gax.Backoff without the need for
diff --git a/vendor/github.com/DataDog/agent-payload/v5/pb/agent_logs_payload.pb.go b/vendor/github.com/DataDog/agent-payload/v5/pb/agent_logs_payload.pb.go
new file mode 100644
index 0000000000..6ffb438ab9
--- /dev/null
+++ b/vendor/github.com/DataDog/agent-payload/v5/pb/agent_logs_payload.pb.go
@@ -0,0 +1,631 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: proto/logs/agent_logs_payload.proto
+
+package pb
+
+import (
+ fmt "fmt"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+type Log struct {
+ Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
+ Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
+ Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ // from host
+ Hostname string `protobuf:"bytes,4,opt,name=hostname,proto3" json:"hostname,omitempty"`
+ // from config
+ Service string `protobuf:"bytes,5,opt,name=service,proto3" json:"service,omitempty"`
+ Source string `protobuf:"bytes,6,opt,name=source,proto3" json:"source,omitempty"`
+ // from config, container tags, ...
+ Tags []string `protobuf:"bytes,7,rep,name=tags,proto3" json:"tags,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Log) Reset() { *m = Log{} }
+func (m *Log) String() string { return proto.CompactTextString(m) }
+func (*Log) ProtoMessage() {}
+func (*Log) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ad4736ffde8ea121, []int{0}
+}
+func (m *Log) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Log) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Log.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Log) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Log.Merge(m, src)
+}
+func (m *Log) XXX_Size() int {
+ return m.Size()
+}
+func (m *Log) XXX_DiscardUnknown() {
+ xxx_messageInfo_Log.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Log proto.InternalMessageInfo
+
+func (m *Log) GetMessage() string {
+ if m != nil {
+ return m.Message
+ }
+ return ""
+}
+
+func (m *Log) GetStatus() string {
+ if m != nil {
+ return m.Status
+ }
+ return ""
+}
+
+func (m *Log) GetTimestamp() int64 {
+ if m != nil {
+ return m.Timestamp
+ }
+ return 0
+}
+
+func (m *Log) GetHostname() string {
+ if m != nil {
+ return m.Hostname
+ }
+ return ""
+}
+
+func (m *Log) GetService() string {
+ if m != nil {
+ return m.Service
+ }
+ return ""
+}
+
+func (m *Log) GetSource() string {
+ if m != nil {
+ return m.Source
+ }
+ return ""
+}
+
+func (m *Log) GetTags() []string {
+ if m != nil {
+ return m.Tags
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Log)(nil), "pb.Log")
+}
+
+func init() {
+ proto.RegisterFile("proto/logs/agent_logs_payload.proto", fileDescriptor_ad4736ffde8ea121)
+}
+
+var fileDescriptor_ad4736ffde8ea121 = []byte{
+ // 257 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x90, 0x41, 0x4a, 0xc4, 0x30,
+ 0x14, 0x86, 0xc9, 0x74, 0xec, 0xd8, 0x20, 0x08, 0x59, 0x48, 0x10, 0x29, 0x45, 0x41, 0xba, 0xb1,
+ 0x5d, 0x88, 0x07, 0x70, 0x98, 0xa5, 0x8b, 0xa1, 0xe0, 0xc6, 0xcd, 0xf0, 0xd2, 0x86, 0x4c, 0x61,
+ 0x32, 0x2f, 0x34, 0xaf, 0x03, 0x9e, 0xcc, 0x2b, 0xb8, 0xf4, 0x08, 0xd2, 0x93, 0x48, 0xd3, 0xaa,
+ 0xbb, 0xff, 0xcb, 0x23, 0x7c, 0xfc, 0x3f, 0xbf, 0x73, 0x1d, 0x12, 0x96, 0x07, 0x34, 0xbe, 0x04,
+ 0xa3, 0x8f, 0xb4, 0x1b, 0xe3, 0xce, 0xc1, 0xfb, 0x01, 0xa1, 0x29, 0xc2, 0x55, 0x2c, 0x9c, 0xba,
+ 0xfd, 0x60, 0x3c, 0x7a, 0x41, 0x23, 0x24, 0x5f, 0x59, 0xed, 0x3d, 0x18, 0x2d, 0x59, 0xc6, 0xf2,
+ 0xa4, 0xfa, 0x45, 0x71, 0xc5, 0x63, 0x4f, 0x40, 0xbd, 0x97, 0x8b, 0x70, 0x98, 0x49, 0xdc, 0xf0,
+ 0x84, 0x5a, 0xab, 0x3d, 0x81, 0x75, 0x32, 0xca, 0x58, 0x1e, 0x55, 0xff, 0x0f, 0xe2, 0x9a, 0x9f,
+ 0xef, 0xd1, 0xd3, 0x11, 0xac, 0x96, 0xcb, 0xf0, 0xef, 0x8f, 0x47, 0x97, 0xd7, 0xdd, 0xa9, 0xad,
+ 0xb5, 0x3c, 0x9b, 0x5c, 0x33, 0x06, 0x17, 0xf6, 0x5d, 0xad, 0x65, 0x3c, 0xbb, 0x02, 0x09, 0xc1,
+ 0x97, 0x04, 0xc6, 0xcb, 0x55, 0x16, 0xe5, 0x49, 0x15, 0xf2, 0xfa, 0xf5, 0x73, 0x48, 0xd9, 0xd7,
+ 0x90, 0xb2, 0xef, 0x21, 0x65, 0xfc, 0xb2, 0x46, 0x5b, 0x34, 0x4d, 0x11, 0xca, 0x16, 0x4e, 0xad,
+ 0x2f, 0x9e, 0xc7, 0xb4, 0x9d, 0x0a, 0x6f, 0xd9, 0xdb, 0xbd, 0x69, 0x69, 0xdf, 0xab, 0xa2, 0x46,
+ 0x5b, 0x6e, 0x80, 0x60, 0x83, 0x66, 0x5a, 0xe6, 0x61, 0x1e, 0xa5, 0x3c, 0x3d, 0x95, 0x4e, 0xa9,
+ 0x38, 0x6c, 0xf3, 0xf8, 0x13, 0x00, 0x00, 0xff, 0xff, 0x7c, 0x8b, 0xab, 0x22, 0x42, 0x01, 0x00,
+ 0x00,
+}
+
+func (m *Log) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Log) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Log) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Tags) > 0 {
+ for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Tags[iNdEx])
+ copy(dAtA[i:], m.Tags[iNdEx])
+ i = encodeVarintAgentLogsPayload(dAtA, i, uint64(len(m.Tags[iNdEx])))
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if len(m.Source) > 0 {
+ i -= len(m.Source)
+ copy(dAtA[i:], m.Source)
+ i = encodeVarintAgentLogsPayload(dAtA, i, uint64(len(m.Source)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if len(m.Service) > 0 {
+ i -= len(m.Service)
+ copy(dAtA[i:], m.Service)
+ i = encodeVarintAgentLogsPayload(dAtA, i, uint64(len(m.Service)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.Hostname) > 0 {
+ i -= len(m.Hostname)
+ copy(dAtA[i:], m.Hostname)
+ i = encodeVarintAgentLogsPayload(dAtA, i, uint64(len(m.Hostname)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Timestamp != 0 {
+ i = encodeVarintAgentLogsPayload(dAtA, i, uint64(m.Timestamp))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.Status) > 0 {
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintAgentLogsPayload(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Message) > 0 {
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintAgentLogsPayload(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintAgentLogsPayload(dAtA []byte, offset int, v uint64) int {
+ offset -= sovAgentLogsPayload(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Log) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Message)
+ if l > 0 {
+ n += 1 + l + sovAgentLogsPayload(uint64(l))
+ }
+ l = len(m.Status)
+ if l > 0 {
+ n += 1 + l + sovAgentLogsPayload(uint64(l))
+ }
+ if m.Timestamp != 0 {
+ n += 1 + sovAgentLogsPayload(uint64(m.Timestamp))
+ }
+ l = len(m.Hostname)
+ if l > 0 {
+ n += 1 + l + sovAgentLogsPayload(uint64(l))
+ }
+ l = len(m.Service)
+ if l > 0 {
+ n += 1 + l + sovAgentLogsPayload(uint64(l))
+ }
+ l = len(m.Source)
+ if l > 0 {
+ n += 1 + l + sovAgentLogsPayload(uint64(l))
+ }
+ if len(m.Tags) > 0 {
+ for _, s := range m.Tags {
+ l = len(s)
+ n += 1 + l + sovAgentLogsPayload(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovAgentLogsPayload(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozAgentLogsPayload(x uint64) (n int) {
+ return sovAgentLogsPayload(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Log) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgentLogsPayload
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Log: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Log: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgentLogsPayload
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgentLogsPayload
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgentLogsPayload
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgentLogsPayload
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgentLogsPayload
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgentLogsPayload
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ m.Timestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgentLogsPayload
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Timestamp |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgentLogsPayload
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgentLogsPayload
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgentLogsPayload
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Hostname = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgentLogsPayload
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgentLogsPayload
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgentLogsPayload
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Service = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgentLogsPayload
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgentLogsPayload
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgentLogsPayload
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Source = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAgentLogsPayload
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAgentLogsPayload
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAgentLogsPayload
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAgentLogsPayload(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthAgentLogsPayload
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipAgentLogsPayload(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAgentLogsPayload
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAgentLogsPayload
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAgentLogsPayload
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthAgentLogsPayload
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupAgentLogsPayload
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthAgentLogsPayload
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthAgentLogsPayload = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowAgentLogsPayload = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupAgentLogsPayload = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/github.com/DataDog/datadog-agent/comp/core/config/LICENSE
similarity index 99%
rename from vendor/google.golang.org/appengine/LICENSE
rename to vendor/github.com/DataDog/datadog-agent/comp/core/config/LICENSE
index d645695673..b370545be1 100644
--- a/vendor/google.golang.org/appengine/LICENSE
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/config/LICENSE
@@ -1,4 +1,3 @@
-
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
@@ -179,7 +178,7 @@
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
+ boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
@@ -187,8 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
- Copyright [yyyy] [name of copyright owner]
-
+ Copyright 2016-present Datadog, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/config/component.go b/vendor/github.com/DataDog/datadog-agent/comp/core/config/component.go
new file mode 100644
index 0000000000..43a0d8f202
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/config/component.go
@@ -0,0 +1,42 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package config implements a component to handle agent configuration. This
+// component temporarily wraps pkg/config.
+//
+// This component initializes pkg/config based on the bundle params, and
+// will return the same results as that package. This is to support migration
+// to a component architecture. When no code still uses pkg/config, that
+// package will be removed.
+//
+// The mock component does nothing at startup, beginning with an empty config.
+// It also overwrites the pkg/config.Datadog for the duration of the test.
+package config
+
+import (
+ "go.uber.org/fx"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ "github.com/DataDog/datadog-agent/pkg/util/fxutil"
+)
+
+// team: agent-shared-components
+
+// LogConfig reads the logger config
+type LogConfig pkgconfigmodel.Reader
+
+// Component is the component type.
+type Component interface {
+ pkgconfigmodel.ReaderWriter
+
+ // Warnings returns config warnings collected during setup.
+ Warnings() *pkgconfigmodel.Warnings
+}
+
+// Module defines the fx options for this component.
+func Module() fxutil.Module {
+ return fxutil.Component(
+ fx.Provide(newConfig))
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/config/component_mock.go b/vendor/github.com/DataDog/datadog-agent/comp/core/config/component_mock.go
new file mode 100644
index 0000000000..37fae0382a
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/config/component_mock.go
@@ -0,0 +1,38 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package config implements a component to handle agent configuration. This
+// component temporarily wraps pkg/config.
+//
+// This component initializes pkg/config based on the bundle params, and
+// will return the same results as that package. This is to support migration
+// to a component architecture. When no code still uses pkg/config, that
+// package will be removed.
+//
+// The mock component does nothing at startup, beginning with an empty config.
+// It also overwrites the pkg/config.Datadog for the duration of the test.
+
+//go:build test
+// +build test
+
+package config
+
+import (
+ "go.uber.org/fx"
+
+ "github.com/DataDog/datadog-agent/pkg/util/fxutil"
+)
+
+// Mock implements mock-specific methods.
+type Mock interface {
+ Component
+}
+
+// MockModule defines the fx options for the mock component.
+func MockModule() fxutil.Module {
+ return fxutil.Component(
+ fx.Provide(newMock),
+ fx.Supply(MockParams{}))
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/config/config.go b/vendor/github.com/DataDog/datadog-agent/comp/core/config/config.go
new file mode 100644
index 0000000000..72c47562e1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/config/config.go
@@ -0,0 +1,101 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package config
+
+import (
+ "os"
+ "strings"
+
+ "go.uber.org/fx"
+
+ "github.com/DataDog/datadog-agent/comp/core/secrets"
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
+ "github.com/DataDog/datadog-agent/pkg/util/optional"
+)
+
+// Reader is a subset of Config that only allows reading of configuration
+type Reader = pkgconfigmodel.Reader //nolint:revive
+
+// cfg implements the Component.
+type cfg struct {
+ // this component is currently implementing a thin wrapper around pkg/config,
+ // and uses globals in that package.
+ pkgconfigmodel.Config
+
+ // warnings are the warnings generated during setup
+ warnings *pkgconfigmodel.Warnings
+}
+
+// configDependencies is an interface that mimics the fx-oriented dependencies struct
+// TODO: (components) investigate whether this interface is worth keeping, otherwise delete it and just use dependencies
+type configDependencies interface {
+ getParams() *Params
+ getSecretResolver() (secrets.Component, bool)
+}
+
+type dependencies struct {
+ fx.In
+
+ Params Params
+ Secret optional.Option[secrets.Component]
+}
+
+func (d dependencies) getParams() *Params {
+ return &d.Params
+}
+
+func (d dependencies) getSecretResolver() (secrets.Component, bool) {
+ return d.Secret.Get()
+}
+
+// NewServerlessConfig initializes a config component from the given config file
+// TODO: serverless must be eventually migrated to fx, this workaround will then become obsolete - ts should not be created directly in this fashion.
+func NewServerlessConfig(path string) (Component, error) {
+ options := []func(*Params){WithConfigName("serverless")}
+
+ _, err := os.Stat(path)
+ if os.IsNotExist(err) &&
+ (strings.HasSuffix(path, ".yaml") || strings.HasSuffix(path, ".yml")) {
+ options = append(options, WithConfigMissingOK(true))
+ } else if !os.IsNotExist(err) {
+ options = append(options, WithConfFilePath(path))
+ }
+
+ d := dependencies{Params: NewParams(path, options...)}
+ return newConfig(d)
+}
+
+func newConfig(deps dependencies) (Component, error) {
+ config := pkgconfigsetup.Datadog
+ warnings, err := setupConfig(config, deps)
+ returnErrFct := func(e error) (Component, error) {
+ if e != nil && deps.Params.ignoreErrors {
+ if warnings == nil {
+ warnings = &pkgconfigmodel.Warnings{}
+ }
+ warnings.Err = e
+ e = nil
+ }
+ return &cfg{Config: config, warnings: warnings}, e
+ }
+
+ if err != nil {
+ return returnErrFct(err)
+ }
+
+ if deps.Params.configLoadSecurityAgent {
+ if err := pkgconfigsetup.Merge(deps.Params.securityAgentConfigFilePaths, config); err != nil {
+ return returnErrFct(err)
+ }
+ }
+
+ return &cfg{Config: config, warnings: warnings}, nil
+}
+
+func (c *cfg) Warnings() *pkgconfigmodel.Warnings {
+ return c.warnings
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/config/config_mock.go b/vendor/github.com/DataDog/datadog-agent/comp/core/config/config_mock.go
new file mode 100644
index 0000000000..5a5a32e2c9
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/config/config_mock.go
@@ -0,0 +1,77 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build test
+// +build test
+
+package config
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/DataDog/datadog-agent/comp/core/secrets"
+ "github.com/DataDog/datadog-agent/pkg/config/env"
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
+ "go.uber.org/fx"
+)
+
+type mockDependencies struct {
+ fx.In
+
+ Params MockParams
+}
+
+func (m mockDependencies) getParams() *Params {
+ p := m.Params.Params
+ return &p
+}
+
+func (m mockDependencies) getSecretResolver() (secrets.Component, bool) {
+ return nil, false
+}
+
+// newMock exported mock builder to allow modifying mocks that might be
+// supplied in tests and used for dep injection.
+func newMock(deps mockDependencies, t testing.TB) (Component, error) {
+ backupConfig := pkgconfigmodel.NewConfig("", "", strings.NewReplacer())
+ backupConfig.CopyConfig(pkgconfigsetup.Datadog)
+
+ pkgconfigsetup.Datadog.CopyConfig(pkgconfigmodel.NewConfig("mock", "XXXX", strings.NewReplacer()))
+
+ env.SetFeatures(t, deps.Params.Features...)
+
+ // call InitConfig to set defaults.
+ pkgconfigsetup.InitConfig(pkgconfigsetup.Datadog)
+ c := &cfg{
+ Config: pkgconfigsetup.Datadog,
+ }
+
+ if !deps.Params.SetupConfig {
+ if deps.Params.ConfFilePath != "" {
+ pkgconfigsetup.Datadog.SetConfigType("yaml")
+ err := pkgconfigsetup.Datadog.ReadConfig(strings.NewReader(deps.Params.ConfFilePath))
+ if err != nil {
+ // The YAML was invalid, fail initialization of the mock config.
+ return nil, err
+ }
+ }
+ } else {
+ warnings, _ := setupConfig(pkgconfigsetup.Datadog, deps)
+ c.warnings = warnings
+ }
+
+ // Overrides are explicit and will take precedence over any other
+ // setting
+ for k, v := range deps.Params.Overrides {
+ pkgconfigsetup.Datadog.SetWithoutSource(k, v)
+ }
+
+ // swap the existing config back at the end of the test.
+ t.Cleanup(func() { pkgconfigsetup.Datadog.CopyConfig(backupConfig) })
+
+ return c, nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/config/params.go b/vendor/github.com/DataDog/datadog-agent/comp/core/config/params.go
new file mode 100644
index 0000000000..2ec66cd852
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/config/params.go
@@ -0,0 +1,129 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package config
+
+// Params defines the parameters for the config component.
+type Params struct {
+ // ConfFilePath is the path at which to look for configuration, usually
+ // given by the --cfgpath command-line flag.
+ ConfFilePath string
+
+ // configName is the root of the name of the configuration file. The
+ // comp/core/config component will search for a file with this name
+ // in ConfFilePath, using a variety of extensions. The default is
+ // "datadog".
+ configName string
+
+ // securityAgentConfigFilePaths are the paths at which to look for security-aegnt
+ // configuration, usually given by the --cfgpath command-line flag.
+ securityAgentConfigFilePaths []string
+
+ // configLoadSecurityAgent determines whether to read the config from
+ // SecurityAgentConfigFilePaths or from ConfFilePath.
+ configLoadSecurityAgent bool
+
+ // configMissingOK determines whether it is a fatal error if the config
+ // file does not exist.
+ configMissingOK bool
+
+ // ignoreErrors determines whether it is OK if the config is not valid
+ // If an error occurs, Component.warnings.Warning contains the error.
+ ignoreErrors bool
+
+ // defaultConfPath determines the default configuration path.
+ // if defaultConfPath is empty, then no default configuration path is used.
+ defaultConfPath string
+}
+
+// NewParams creates a new instance of Params
+func NewParams(defaultConfPath string, options ...func(*Params)) Params {
+ params := Params{
+ defaultConfPath: defaultConfPath,
+ }
+ for _, o := range options {
+ o(¶ms)
+ }
+ return params
+}
+
+// NewAgentParams creates a new instance of Params for the Agent.
+func NewAgentParams(confFilePath string, options ...func(*Params)) Params {
+ params := NewParams(DefaultConfPath, options...)
+ params.ConfFilePath = confFilePath
+ return params
+}
+
+// NewSecurityAgentParams creates a new instance of Params for the Security Agent.
+func NewSecurityAgentParams(securityAgentConfigFilePaths []string, options ...func(*Params)) Params {
+ params := NewParams(DefaultConfPath, options...)
+
+ // By default, we load datadog.yaml and then merge security-agent.yaml
+ if len(securityAgentConfigFilePaths) > 0 {
+ params.ConfFilePath = securityAgentConfigFilePaths[0] // Default: datadog.yaml
+ params.securityAgentConfigFilePaths = securityAgentConfigFilePaths[1:] // Default: security-agent.yaml
+ }
+ params.configLoadSecurityAgent = true
+ params.configMissingOK = false
+ return params
+}
+
+// NewClusterAgentParams returns a new Params struct for the cluster agent
+func NewClusterAgentParams(configFilePath string, options ...func(*Params)) Params {
+ params := NewParams(DefaultConfPath, options...)
+ params.ConfFilePath = configFilePath
+ params.configName = "datadog-cluster"
+ return params
+}
+
+// WithConfigName returns an option which sets the config name
+func WithConfigName(name string) func(*Params) {
+ return func(b *Params) {
+ b.configName = name
+ }
+}
+
+// WithConfigMissingOK returns an option which sets configMissingOK
+func WithConfigMissingOK(v bool) func(*Params) {
+ return func(b *Params) {
+ b.configMissingOK = v
+ }
+}
+
+// WithIgnoreErrors returns an option which sets ignoreErrors
+func WithIgnoreErrors(v bool) func(*Params) {
+ return func(b *Params) {
+ b.ignoreErrors = v
+ }
+}
+
+// WithSecurityAgentConfigFilePaths returns an option which sets securityAgentConfigFilePaths
+func WithSecurityAgentConfigFilePaths(securityAgentConfigFilePaths []string) func(*Params) {
+ return func(b *Params) {
+ b.securityAgentConfigFilePaths = securityAgentConfigFilePaths
+ }
+}
+
+// WithConfigLoadSecurityAgent returns an option which sets configLoadSecurityAgent
+func WithConfigLoadSecurityAgent(configLoadSecurityAgent bool) func(*Params) {
+ return func(b *Params) {
+ b.configLoadSecurityAgent = configLoadSecurityAgent
+ }
+}
+
+// WithConfFilePath returns an option which sets ConfFilePath
+func WithConfFilePath(confFilePath string) func(*Params) {
+ return func(b *Params) {
+ b.ConfFilePath = confFilePath
+ }
+}
+
+// These functions are used in unit tests.
+
+// ConfigMissingOK determines whether it is a fatal error if the config
+// file does not exist.
+func (p Params) ConfigMissingOK() bool {
+ return p.configMissingOK
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/config/params_darwin.go b/vendor/github.com/DataDog/datadog-agent/comp/core/config/params_darwin.go
new file mode 100644
index 0000000000..d6525f2be5
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/config/params_darwin.go
@@ -0,0 +1,11 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package config
+
+const (
+ // DefaultConfPath points to the folder containing datadog.yaml
+ DefaultConfPath = "/opt/datadog-agent/etc"
+)
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/config/params_freebsd.go b/vendor/github.com/DataDog/datadog-agent/comp/core/config/params_freebsd.go
new file mode 100644
index 0000000000..346133db35
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/config/params_freebsd.go
@@ -0,0 +1,11 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package config
+
+const (
+ // DefaultConfPath points to the folder containing datadog.yaml
+ DefaultConfPath = "/usr/local/etc/datadog-agent"
+)
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/config/params_mock.go b/vendor/github.com/DataDog/datadog-agent/comp/core/config/params_mock.go
new file mode 100644
index 0000000000..a918de8fbd
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/config/params_mock.go
@@ -0,0 +1,31 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build test
+// +build test
+
+package config
+
+import (
+ pkgconfigenv "github.com/DataDog/datadog-agent/pkg/config/env"
+)
+
+// MockParams defines the parameter for the mock config.
+// It is designed to be used with `fx.Replace` which replaces the default
+// empty value of `MockParams`.
+//
+// fx.Replace(configComponent.MockParams{Overrides: overrides})
+type MockParams struct {
+ Params
+
+ // Overrides is a parameter used to override values of the config
+ Overrides map[string]interface{}
+
+ // Features is a parameter to set features for the mock config
+ Features []pkgconfigenv.Feature
+
+ // SetupConfig sets up the config as if it weren't a mock; essentially a full init
+ SetupConfig bool
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/config/params_nix.go b/vendor/github.com/DataDog/datadog-agent/comp/core/config/params_nix.go
new file mode 100644
index 0000000000..8258b2ba66
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/config/params_nix.go
@@ -0,0 +1,13 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build netbsd || openbsd || solaris || dragonfly || linux
+
+package config
+
+const (
+ // DefaultConfPath points to the folder containing datadog.yaml
+ DefaultConfPath = "/etc/datadog-agent"
+)
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/config/params_windows.go b/vendor/github.com/DataDog/datadog-agent/comp/core/config/params_windows.go
new file mode 100644
index 0000000000..da7630c2c4
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/config/params_windows.go
@@ -0,0 +1,20 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package config
+
+import "github.com/DataDog/datadog-agent/pkg/util/winutil"
+
+var (
+ // DefaultConfPath points to the folder containing datadog.yaml
+ DefaultConfPath = "c:\\programdata\\datadog"
+)
+
+func init() {
+ pd, err := winutil.GetProgramDataDir()
+ if err == nil {
+ DefaultConfPath = pd
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/config/setup.go b/vendor/github.com/DataDog/datadog-agent/comp/core/config/setup.go
new file mode 100644
index 0000000000..796990ef03
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/config/setup.go
@@ -0,0 +1,72 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package config
+
+import (
+ "errors"
+ "fmt"
+ "io/fs"
+ "runtime"
+ "strings"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
+ "github.com/DataDog/viper"
+)
+
+// setupConfig is copied from cmd/agent/common/helpers.go.
+func setupConfig(config pkgconfigmodel.Config, deps configDependencies) (*pkgconfigmodel.Warnings, error) {
+ p := deps.getParams()
+
+ confFilePath := p.ConfFilePath
+ configName := p.configName
+ failOnMissingFile := !p.configMissingOK
+ defaultConfPath := p.defaultConfPath
+
+ if configName != "" {
+ config.SetConfigName(configName)
+ }
+
+ // set the paths where a config file is expected
+ if len(confFilePath) != 0 {
+ // if the configuration file path was supplied on the command line,
+ // add that first so it's first in line
+ config.AddConfigPath(confFilePath)
+ // If they set a config file directly, let's try to honor that
+ if strings.HasSuffix(confFilePath, ".yaml") || strings.HasSuffix(confFilePath, ".yml") {
+ config.SetConfigFile(confFilePath)
+ }
+ }
+ if defaultConfPath != "" {
+ config.AddConfigPath(defaultConfPath)
+ }
+
+ // load the configuration
+ var err error
+ var warnings *pkgconfigmodel.Warnings
+ if resolver, ok := deps.getSecretResolver(); ok {
+ warnings, err = pkgconfigsetup.LoadWithSecret(config, resolver, pkgconfigsetup.SystemProbe.GetEnvVars())
+ } else {
+ warnings, err = pkgconfigsetup.LoadWithoutSecret(config, pkgconfigsetup.SystemProbe.GetEnvVars())
+ }
+
+ // If `!failOnMissingFile`, do not issue an error if we cannot find the default config file.
+ var e viper.ConfigFileNotFoundError
+ if err != nil && (failOnMissingFile || !errors.As(err, &e) || confFilePath != "") {
+ // special-case permission-denied with a clearer error message
+ if errors.Is(err, fs.ErrPermission) {
+ if runtime.GOOS == "windows" {
+ err = fmt.Errorf(`cannot access the Datadog config file (%w); try running the command in an Administrator shell"`, err)
+ } else {
+ err = fmt.Errorf("cannot access the Datadog config file (%w); try running the command under the same user as the Datadog Agent", err)
+ }
+ } else {
+ err = fmt.Errorf("unable to load Datadog config file: %w", err)
+ }
+ return warnings, err
+ }
+ return warnings, nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface/LICENSE b/vendor/github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface/component.go b/vendor/github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface/component.go
new file mode 100644
index 0000000000..afbc02b366
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface/component.go
@@ -0,0 +1,29 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package hostnameinterface describes the interface for hostname methods
+package hostnameinterface
+
+import (
+ "context"
+)
+
+// team: agent-shared-components
+
+// Data contains hostname and the hostname provider
+type Data struct {
+ Hostname string
+ Provider string
+}
+
+// Component is the type for hostname methods.
+type Component interface {
+ // Get returns the host name for the agent.
+ Get(context.Context) (string, error)
+ // GetWithProvider returns the hostname for the Agent and the provider that was use to retrieve it.
+ GetWithProvider(ctx context.Context) (Data, error)
+ // GetSafe is Get(), but it returns 'unknown host' if anything goes wrong.
+ GetSafe(context.Context) string
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface/component_mock.go b/vendor/github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface/component_mock.go
new file mode 100644
index 0000000000..1a7ee483a4
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface/component_mock.go
@@ -0,0 +1,14 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+//go:build test
+
+package hostnameinterface
+
+// Mock implements mock-specific methods.
+type Mock interface {
+ // Component methods are included in Mock.
+ Component
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface/data.go b/vendor/github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface/data.go
new file mode 100644
index 0000000000..87aacb51ea
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface/data.go
@@ -0,0 +1,26 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build !serverless
+
+package hostnameinterface
+
+const (
+ // ConfigProvider is the default provider value from the configuration file
+ ConfigProvider = "configuration"
+
+ // FargateProvider is the default provider value from Fargate
+ FargateProvider = "fargate"
+)
+
+// FromConfiguration returns true if the hostname was found through the configuration file
+func (h Data) FromConfiguration() bool {
+ return h.Provider == ConfigProvider
+}
+
+// FromFargate returns true if the hostname was found through Fargate
+func (h Data) FromFargate() bool {
+ return h.Provider == FargateProvider
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface/data_serverless.go b/vendor/github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface/data_serverless.go
new file mode 100644
index 0000000000..c16d1463f2
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface/data_serverless.go
@@ -0,0 +1,18 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build serverless
+
+package hostnameinterface
+
+// IsConfigurationProvider returns false for serverless
+func (h Data) FromConfiguration() bool {
+ return false
+}
+
+// fromFargate returns true if the hostname was found through Fargate
+func (h Data) FromFargate() bool {
+ return false
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface/service_mock.go b/vendor/github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface/service_mock.go
new file mode 100644
index 0000000000..70551f13e8
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface/service_mock.go
@@ -0,0 +1,63 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+//go:build test
+
+package hostnameinterface
+
+import (
+ "context"
+
+ "github.com/DataDog/datadog-agent/pkg/util/fxutil"
+
+ "go.uber.org/fx"
+)
+
+// MockModule defines the fx options for the mock component.
+// Injecting MockModule will provide the hostname 'my-hostname';
+// override this with fx.Replace(hostname.MockHostname("whatever")).
+func MockModule() fxutil.Module {
+ return fxutil.Component(
+ fx.Provide(
+ NewMock,
+ ),
+ fx.Supply(MockHostname("my-hostname")))
+}
+
+type mockService struct {
+ name string
+}
+
+var _ Mock = (*mockService)(nil)
+
+func (m *mockService) Get(_ context.Context) (string, error) {
+ return m.name, nil
+}
+
+func (m *mockService) GetSafe(_ context.Context) string {
+ return m.name
+}
+
+func (m *mockService) Set(name string) {
+ m.name = name
+}
+
+// GetWithProvider returns the hostname for the Agent and the provider that was use to retrieve it.
+func (m *mockService) GetWithProvider(_ context.Context) (Data, error) {
+ return Data{
+ Hostname: m.name,
+ Provider: "mockService",
+ }, nil
+}
+
+// MockHostname is an alias for injecting a mock hostname.
+// Usage: fx.Replace(hostname.MockHostname("whatever"))
+type MockHostname string
+
+// NewMock returns a new instance of the mock for the component hostname
+func NewMock(name MockHostname) (Component, Mock) {
+ mock := &mockService{string(name)}
+ return mock, mock
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/log/LICENSE b/vendor/github.com/DataDog/datadog-agent/comp/core/log/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/log/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/log/component.go b/vendor/github.com/DataDog/datadog-agent/comp/core/log/component.go
new file mode 100644
index 0000000000..17cb1eab14
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/log/component.go
@@ -0,0 +1,59 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package log implements a component to handle logging internal to the agent.
+//
+// The component uses a number of values in BundleParams to decide how to
+// initialize itself, reading values from the comp/core/config component when
+// necessary. At present, it configures and wraps the global logger in
+// pkg/util/log, but will eventually be self-sufficient.
+//
+// The mock component does not read any configuration values, and redirects
+// logging output to `t.Log(..)`, for ease of investigation when a test fails.
+package log
+
+// team: agent-shared-components
+
+// Component is the component type.
+type Component interface {
+ // Trace logs the given arguments, separated by spaces, at the trace level
+ Trace(v ...interface{})
+ // Tracef logs the given formatted arguments at the trace level
+ Tracef(format string, params ...interface{})
+
+ // Debug logs the given arguments, separated by spaces, at the debug level
+ Debug(v ...interface{})
+ // Debugf logs the given formatted arguments at the debug level
+ Debugf(format string, params ...interface{})
+
+ // Info logs the given arguments, separated by spaces, at the info level
+ Info(v ...interface{})
+ // Infof logs the given formatted arguments at the info level
+ Infof(format string, params ...interface{})
+
+ // Warn logs the given arguments, separated by spaces, at the warn level,
+ // and returns an error containing the messages.
+ Warn(v ...interface{}) error
+ // Warnf logs the given formatted arguments at the warn level, and returns
+ // an error containing the message.
+ Warnf(format string, params ...interface{}) error
+
+ // Error logs the given arguments, separated by spaces, at the error level,
+ // and returns an error containing the messages.
+ Error(v ...interface{}) error
+ // Errorf logs the given formatted arguments at the error level, and returns
+ // an error containing the message.
+ Errorf(format string, params ...interface{}) error
+
+ // Critical logs the given arguments, separated by spaces, at the critical level,
+ // an error containing the message.
+ Critical(v ...interface{}) error
+ // Criticalf logs the given formatted arguments at the critical level, and returns
+ // an error containing the message.
+ Criticalf(format string, params ...interface{}) error
+
+ // Flush will flush the contents of the logs to the sinks
+ Flush()
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/log/component_mock.go b/vendor/github.com/DataDog/datadog-agent/comp/core/log/component_mock.go
new file mode 100644
index 0000000000..afdf54ce4e
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/log/component_mock.go
@@ -0,0 +1,15 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build test
+
+package log
+
+// Mock is the mocked component type.
+type Mock interface {
+ Component
+
+ // no further methods are defined.
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/secrets/LICENSE b/vendor/github.com/DataDog/datadog-agent/comp/core/secrets/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/secrets/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/secrets/component.go b/vendor/github.com/DataDog/datadog-agent/comp/core/secrets/component.go
new file mode 100644
index 0000000000..df8b6b7e0d
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/secrets/component.go
@@ -0,0 +1,40 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package secrets decodes secret values by invoking the configured executable command
+package secrets
+
+import (
+ "io"
+)
+
+// team: agent-shared-components
+
+// ConfigParams holds parameters for configuration
+type ConfigParams struct {
+ Command string
+ Arguments []string
+ Timeout int
+ MaxSize int
+ RefreshInterval int
+ GroupExecPerm bool
+ RemoveLinebreak bool
+ RunPath string
+ AuditFileMaxSize int
+}
+
+// Component is the component type.
+type Component interface {
+ // Configure the executable command that is used for decoding secrets
+ Configure(config ConfigParams)
+ // Get debug information and write it to the parameter
+ GetDebugInfo(w io.Writer)
+ // Resolve resolves the secrets in the given yaml data by replacing secrets handles by their corresponding secret value
+ Resolve(data []byte, origin string) ([]byte, error)
+ // SubscribeToChanges registers a callback to be invoked whenever secrets are resolved or refreshed
+ SubscribeToChanges(callback SecretChangeCallback)
+ // Refresh will resolve secret handles again, notifying any subscribers of changed values
+ Refresh() (string, error)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/secrets/component_mock.go b/vendor/github.com/DataDog/datadog-agent/comp/core/secrets/component_mock.go
new file mode 100644
index 0000000000..18f2771285
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/secrets/component_mock.go
@@ -0,0 +1,41 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build test
+
+package secrets
+
+// Mock implements mock-specific methods for the resources component.
+//
+// Usage:
+//
+// fxutil.Test[dependencies](
+// t,
+// resources.MockModule,
+// fx.Replace(resources.MockParams{Data: someData}),
+// )
+type Mock interface {
+ Component
+
+ // SetFetchHookFunc allows the caller to overwrite the function that resolves secrets (and exec the secret
+ // binary).
+ //
+ // The mock function pass as parameter will receive a list of handle to resolve and should return a map with the
+ // resolved value for each.
+ //
+ // Example:
+ // a call like: fetchHookFunc([]{"a", "b", "c"})
+ //
+ // needs to return:
+ // map[string]string{
+ // "a": "resolved_value_for_a",
+ // "b": "resolved_value_for_b",
+ // "c": "resolved_value_for_c",
+ // }
+ SetFetchHookFunc(func([]string) (map[string]string, error))
+
+ // SetBackendCommand sets the backend command for resolving secrets
+ SetBackendCommand(command string)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/secrets/docs.go b/vendor/github.com/DataDog/datadog-agent/comp/core/secrets/docs.go
new file mode 100644
index 0000000000..9f3969017e
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/secrets/docs.go
@@ -0,0 +1,7 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2018-present Datadog, Inc.
+
+// Package secrets implements the secret feature of the agent
+package secrets
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/secrets/params.go b/vendor/github.com/DataDog/datadog-agent/comp/core/secrets/params.go
new file mode 100644
index 0000000000..94da25fd57
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/secrets/params.go
@@ -0,0 +1,25 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2018-present Datadog, Inc.
+
+package secrets
+
+// Params contains parameters for secrets, specifically whether the component is enabled
+type Params struct {
+ Enabled bool
+}
+
+// NewEnabledParams constructs params for an enabled component
+func NewEnabledParams() Params {
+ return Params{
+ Enabled: true,
+ }
+}
+
+// NewDisabledParams constructs params for a disabled component
+func NewDisabledParams() Params {
+ return Params{
+ Enabled: false,
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/secrets/type.go b/vendor/github.com/DataDog/datadog-agent/comp/core/secrets/type.go
new file mode 100644
index 0000000000..c308488512
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/secrets/type.go
@@ -0,0 +1,24 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2018-present Datadog, Inc.
+
+package secrets
+
+// SecretVal defines the structure for secrets in JSON output
+type SecretVal struct {
+ Value string `json:"value,omitempty"`
+ ErrorMsg string `json:"error,omitempty"`
+}
+
+// SecretChangeCallback is the callback type used by SubscribeToChanges to send notifications
+// This callback will be called once for each time a handle at a particular path is resolved or refreshed
+// `handle`: the handle of the secret (example: `ENC[api_key]` the handle is `api_key`)
+// `origin`: origin file of the configuration
+// `path`: a path into the config file where the secret appears, each part is a level of nesting, arrays will use stringified indexes
+// `oldValue`: the value that the secret used to have, the empty string "" is it hasn't been resolved before
+// `newValue`: the new value that the secret has resolved to
+type SecretChangeCallback func(handle, origin string, path []string, oldValue, newValue any)
+
+// PayloadVersion defines the current payload version sent to a secret backend
+const PayloadVersion = "1.0"
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/LICENSE b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/collector.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/collector.go
new file mode 100644
index 0000000000..d211a07cea
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/collector.go
@@ -0,0 +1,13 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+//go:build !serverless
+
+package telemetry
+
+import "github.com/prometheus/client_golang/prometheus"
+
+// Collector is an alias to prometheus.Collector
+type Collector = prometheus.Collector
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/collector_noop.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/collector_noop.go
new file mode 100644
index 0000000000..ce639d9a0c
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/collector_noop.go
@@ -0,0 +1,10 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+//go:build serverless
+
+package telemetry
+
+type Collector interface{}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/component.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/component.go
new file mode 100644
index 0000000000..cc56e6ce11
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/component.go
@@ -0,0 +1,59 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+// Package telemetry implements a component for all agent telemetry.
+package telemetry
+
+import (
+ "net/http"
+)
+
+// team: agent-shared-components
+
+// Component is the component type.
+type Component interface {
+ // Handler returns an http handler to expose the internal metrics
+ Handler() http.Handler
+ // Reset resets all tracked telemetry
+ Reset()
+ // RegisterCollector Registers a Collector with the prometheus registry
+ RegisterCollector(c Collector)
+ // UnregisterCollector unregisters a Collector with the prometheus registry
+ UnregisterCollector(c Collector) bool
+ // Meter returns a new OTEL meter
+ Meter(name string, opts ...MeterOption) Meter
+ // NewCounter creates a Counter with default options for telemetry purpose.
+ NewCounter(subsystem, name string, tags []string, help string) Counter
+ // NewCounterWithOpts creates a Counter with the given options for telemetry purpose.
+ NewCounterWithOpts(subsystem, name string, tags []string, help string, opts Options) Counter
+
+ // NewSimpleCounter creates a new SimpleCounter with default options.
+ NewSimpleCounter(subsystem, name, help string) SimpleCounter
+ // NewSimpleCounterWithOpts creates a new SimpleCounter.
+ NewSimpleCounterWithOpts(subsystem, name, help string, opts Options) SimpleCounter
+
+ // NewGauge creates a Gauge with default options for telemetry purpose.
+ NewGauge(subsystem, name string, tags []string, help string) Gauge
+ // NewGaugeWithOpts creates a Gauge with the given options for telemetry purpose.
+ NewGaugeWithOpts(subsystem, name string, tags []string, help string, opts Options) Gauge
+
+ // NewSimpleGauge creates a new SimpleGauge with default options.
+ NewSimpleGauge(subsystem, name, help string) SimpleGauge
+ // NewSimpleGaugeWithOpts creates a new SimpleGauge.
+ NewSimpleGaugeWithOpts(subsystem, name, help string, opts Options) SimpleGauge
+
+ // NewHistogram creates a Histogram with default options for telemetry purpose.
+ NewHistogram(subsystem, name string, tags []string, help string, buckets []float64) Histogram
+ // NewHistogramWithOpts creates a Histogram with the given options for telemetry purpose.
+ NewHistogramWithOpts(subsystem, name string, tags []string, help string, buckets []float64, opts Options) Histogram
+
+ // NewSimpleHistogram creates a new SimpleHistogram with default options.
+ NewSimpleHistogram(subsystem, name, help string, buckets []float64) SimpleHistogram
+ // NewSimpleHistogramWithOpts creates a new SimpleHistogram.
+ NewSimpleHistogramWithOpts(subsystem, name, help string, buckets []float64, opts Options) SimpleHistogram
+
+ // Gather exposes metrics from the general or default telemetry registry (see options.DefaultMetric)
+ Gather(defaultGather bool) ([]*MetricFamily, error)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/component_mock.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/component_mock.go
new file mode 100644
index 0000000000..8907b06573
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/component_mock.go
@@ -0,0 +1,21 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+//go:build !serverless && test
+
+package telemetry
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+ sdk "go.opentelemetry.io/otel/sdk/metric"
+)
+
+// Mock implements mock-specific methods.
+type Mock interface {
+ Component
+
+ GetRegistry() *prometheus.Registry
+ GetMeterProvider() *sdk.MeterProvider
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/counter.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/counter.go
new file mode 100644
index 0000000000..186aa32733
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/counter.go
@@ -0,0 +1,36 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetry
+
+// Counter tracks how many times something is happening.
+type Counter interface {
+ // InitializeToZero creates the counter with the given tags and initializes it to 0.
+ // This method is intended to be used when the counter value is important to
+ // send even before any incrementing/addition is done on it.
+ InitializeToZero(tagsValue ...string)
+ // Inc increments the counter with the given tags value.
+ Inc(tagsValue ...string)
+ // Add adds the given value to the counter with the given tags value.
+ Add(value float64, tagsValue ...string)
+ // Delete deletes the value for the counter with the given tags value.
+ Delete(tagsValue ...string)
+ // IncWithTags increments the counter with the given tags.
+ // Even if less convenient, this signature could be used in hot path
+ // instead of Inc(...string) to avoid escaping the parameters on the heap.
+ IncWithTags(tags map[string]string)
+ // AddWithTags adds the given value to the counter with the given tags.
+ // Even if less convenient, this signature could be used in hot path
+ // instead of Add(float64, ...string) to avoid escaping the parameters on the heap.
+ AddWithTags(value float64, tags map[string]string)
+ // DeleteWithTags deletes the value for the counter with the given tags.
+ // Even if less convenient, this signature could be used in hot path
+ // instead of Delete(...string) to avoid escaping the parameters on the heap.
+ DeleteWithTags(tags map[string]string)
+ // WithValues returns SimpleCounter for this metric with the given tag values.
+ WithValues(tagsValue ...string) SimpleCounter
+ // WithTags returns SimpleCounter for this metric with the given tag values.
+ WithTags(tags map[string]string) SimpleCounter
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/gauge.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/gauge.go
new file mode 100644
index 0000000000..b58ef14aac
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/gauge.go
@@ -0,0 +1,26 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetry
+
+// Gauge tracks the value of one health metric of the Agent.
+type Gauge interface {
+ // Set stores the value for the given tags.
+ Set(value float64, tagsValue ...string)
+ // Inc increments the Gauge value.
+ Inc(tagsValue ...string)
+ // Dec decrements the Gauge value.
+ Dec(tagsValue ...string)
+ // Add adds the value to the Gauge value.
+ Add(value float64, tagsValue ...string)
+ // Sub subtracts the value to the Gauge value.
+ Sub(value float64, tagsValue ...string)
+ // Delete deletes the value for the Gauge with the given tags.
+ Delete(tagsValue ...string)
+ // WithValues returns SimpleGauge for this metric with the given tag values.
+ WithValues(tagsValue ...string) SimpleGauge
+ // WithTags returns SimpleGauge for this metric with the given tag values.
+ WithTags(tags map[string]string) SimpleGauge
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/histogram.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/histogram.go
new file mode 100644
index 0000000000..a98ef24ba8
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/histogram.go
@@ -0,0 +1,18 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetry
+
+// Histogram tracks the value of one health metric of the Agent.
+type Histogram interface {
+ // Observe the value to the Histogram value.
+ Observe(value float64, tagsValue ...string)
+ // Delete deletes the value for the Histogram with the given tags.
+ Delete(tagsValue ...string)
+ // WithValues returns SimpleHistogram for this metric with the given tag values.
+ WithValues(tagsValue ...string) SimpleHistogram
+ // WithTags returns SimpleHistogram for this metric with the given tag values.
+ WithTags(tags map[string]string) SimpleHistogram
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/metric.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/metric.go
new file mode 100644
index 0000000000..e8360ecead
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/metric.go
@@ -0,0 +1,22 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+//go:build !serverless
+
+package telemetry
+
+import (
+ dto "github.com/prometheus/client_model/go"
+ "go.opentelemetry.io/otel/metric"
+)
+
+// MeterOption is an alias to metric.MeterOption
+type MeterOption = metric.MeterOption
+
+// Meter is an alias to metric.Meter
+type Meter = metric.Meter
+
+// MetricFamily is an alias to dto.MetricFamily
+type MetricFamily = dto.MetricFamily
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/metric_noop.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/metric_noop.go
new file mode 100644
index 0000000000..8b008a784e
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/metric_noop.go
@@ -0,0 +1,16 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+//go:build serverless
+
+package telemetry
+
+type MeterOption interface {
+}
+
+type Meter interface {
+}
+
+type MetricFamily struct{}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/counter.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/counter.go
new file mode 100644
index 0000000000..4175bf9fbc
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/counter.go
@@ -0,0 +1,62 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package noopsimpl
+
+import (
+ "github.com/DataDog/datadog-agent/comp/core/telemetry"
+)
+
+// Counter implementation using Prometheus.
+type slsCounter struct{}
+
+// InitializeToZero creates the counter with the given tags and initializes it to 0.
+// This method is intended to be used when the counter value is important to
+// send even before any incrementing/addition is done on it.
+func (c *slsCounter) InitializeToZero(...string) {
+ // By requesting a counter for a set of tags, we are creating and initializing
+ // the counter at 0. See the following for more info:
+ // https://github.com/prometheus/client_golang/blob/v1.9.0/prometheus/counter.go#L194-L196
+}
+
+// Add adds the given value to the counter with the given tags value.
+//
+// If the value is < 0, no add takes place, as the counter is monotonic.
+// The prometheus client would panic in such a case.
+func (c *slsCounter) Add(float64, ...string) {}
+
+// AddWithTags adds the given value to the counter with the given tags.
+// Even if less convenient, this signature could be used in hot path
+// instead of Add(float64, ...string) to avoid escaping the parameters on the heap.
+//
+// If the value is < 0, no add takes place, as the counter is monotonic.
+// The prometheus client would panic in such a case.
+func (c *slsCounter) AddWithTags(float64, map[string]string) {}
+
+// Inc increments the counter with the given tags value.
+func (c *slsCounter) Inc(...string) {}
+
+// IncWithTags increments the counter with the given tags.
+// Even if less convenient, this signature could be used in hot path
+// instead of Inc(...string) to avoid escaping the parameters on the heap.
+func (c *slsCounter) IncWithTags(map[string]string) {}
+
+// Delete deletes the value for the counter with the given tags value.
+func (c *slsCounter) Delete(...string) {}
+
+// DeleteWithTags deletes the value for the counter with the given tags.
+// Even if less convenient, this signature could be used in hot path
+// instead of Delete(...string) to avoid escaping the parameters on the heap.
+func (c *slsCounter) DeleteWithTags(map[string]string) {}
+
+// WithValues returns SimpleCounter for this metric with the given tag values.
+func (c *slsCounter) WithValues(...string) telemetry.SimpleCounter {
+ return &simpleNoOpCounter{}
+}
+
+// Withtags returns SimpleCounter for this metric with the given tag values.
+func (c *slsCounter) WithTags(map[string]string) telemetry.SimpleCounter {
+ return &simpleNoOpCounter{}
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/gauge.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/gauge.go
new file mode 100644
index 0000000000..7dfa03e4f8
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/gauge.go
@@ -0,0 +1,39 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package noopsimpl
+
+import "github.com/DataDog/datadog-agent/comp/core/telemetry"
+
+// Gauge implementation using Prometheus.
+type slsGauge struct{}
+
+// Set stores the value for the given tags.
+func (g *slsGauge) Set(float64, ...string) {}
+
+// Inc increments the Gauge value.
+func (g *slsGauge) Inc(...string) {}
+
+// Dec decrements the Gauge value.
+func (g *slsGauge) Dec(...string) {}
+
+// Delete deletes the value for the Gauge with the given tags.
+func (g *slsGauge) Delete(...string) {}
+
+// Add adds the value to the Gauge value.
+func (g *slsGauge) Add(float64, ...string) {}
+
+// Sub subtracts the value to the Gauge value.
+func (g *slsGauge) Sub(float64, ...string) {}
+
+// WithValues returns SimpleGauge for this metric with the given tag values.
+func (g *slsGauge) WithValues(...string) telemetry.SimpleGauge {
+ return &simpleNoOpGauge{}
+}
+
+// Withtags returns SimpleGauge for this metric with the given tag values.
+func (g *slsGauge) WithTags(map[string]string) telemetry.SimpleGauge {
+ return &simpleNoOpGauge{}
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/histogram.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/histogram.go
new file mode 100644
index 0000000000..c875ed3a69
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/histogram.go
@@ -0,0 +1,29 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package noopsimpl
+
+import "github.com/DataDog/datadog-agent/comp/core/telemetry"
+
+// Prometheus implements histograms using Prometheus.
+type slsHistogram struct{}
+
+// Observe samples the value for the given tags.
+func (h *slsHistogram) Observe(float64, ...string) {}
+
+// Delete deletes the value for the Histogram with the given tags.
+func (h *slsHistogram) Delete(...string) {}
+
+// WithValues returns SimpleHistogram for this metric with the given tag values.
+func (h *slsHistogram) WithValues(...string) telemetry.SimpleHistogram {
+ // Prometheus does not directly expose the underlying histogram so we have to cast it.
+ return &simpleNoOpHistogram{}
+}
+
+// WithValues returns SimpleHistogram for this metric with the given tag values.
+func (h *slsHistogram) WithTags(map[string]string) telemetry.SimpleHistogram {
+ // Prometheus does not directly expose the underlying histogram so we have to cast it.
+ return &simpleNoOpHistogram{}
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/simple_noop_counter.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/simple_noop_counter.go
new file mode 100644
index 0000000000..a127b10197
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/simple_noop_counter.go
@@ -0,0 +1,19 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package noopsimpl
+
+type simpleNoOpCounter struct{}
+
+// Inc increments the counter.
+func (s *simpleNoOpCounter) Inc() {}
+
+// Add increments the counter by given amount.
+func (s *simpleNoOpCounter) Add(float64) {}
+
+// Get gets the current counter value
+func (s *simpleNoOpCounter) Get() float64 {
+ return 0
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/simple_noop_gauge.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/simple_noop_gauge.go
new file mode 100644
index 0000000000..cc2bd446ae
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/simple_noop_gauge.go
@@ -0,0 +1,28 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package noopsimpl
+
+type simpleNoOpGauge struct{}
+
+// Inc increments the gauge.
+func (s *simpleNoOpGauge) Inc() {}
+
+// Dec decrements the gauge.
+func (s *simpleNoOpGauge) Dec() {}
+
+// Add increments the gauge by given amount.
+func (s *simpleNoOpGauge) Add(float64) {}
+
+// Sub decrements the gauge by given amount.
+func (s *simpleNoOpGauge) Sub(float64) {}
+
+// Set sets the value of the gauge.
+func (s *simpleNoOpGauge) Set(float64) {}
+
+// Get gets the value of the gauge.
+func (s *simpleNoOpGauge) Get() float64 {
+ return 0
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/simple_noop_histogram.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/simple_noop_histogram.go
new file mode 100644
index 0000000000..0e6632a8ad
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/simple_noop_histogram.go
@@ -0,0 +1,19 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package noopsimpl
+
+import "github.com/DataDog/datadog-agent/comp/core/telemetry"
+
+// Prometheus implements histograms using Prometheus.
+type simpleNoOpHistogram struct {
+}
+
+// Observe the value to the Histogram value.
+func (s *simpleNoOpHistogram) Observe(float64) {}
+
+func (s *simpleNoOpHistogram) Get() telemetry.HistogramValue {
+ return telemetry.HistogramValue{}
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/telemetry.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/telemetry.go
new file mode 100644
index 0000000000..42a3584452
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl/telemetry.go
@@ -0,0 +1,115 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+// Package noopsimpl creates the noop telemetry component
+package noopsimpl
+
+import (
+ "net/http"
+
+ "github.com/DataDog/datadog-agent/comp/core/telemetry"
+ "github.com/DataDog/datadog-agent/pkg/util/fxutil"
+ "go.uber.org/fx"
+)
+
+type noopImpl struct{}
+
+func newTelemetry() telemetry.Component {
+ return &noopImpl{}
+}
+
+type dummy struct{}
+
+func (d *dummy) ServeHTTP(w http.ResponseWriter, _ *http.Request) {
+ w.Write([]byte("Telemtry is not enabled"))
+ w.WriteHeader(200)
+}
+
+var dummyHandler = dummy{}
+
+func (t *noopImpl) Handler() http.Handler {
+ return &dummyHandler
+}
+
+func (t *noopImpl) Reset() {
+}
+
+func (t *noopImpl) NewCounter(subsystem, name string, tags []string, help string) telemetry.Counter {
+ return t.NewCounterWithOpts(subsystem, name, tags, help, telemetry.DefaultOptions)
+}
+
+func (t *noopImpl) NewCounterWithOpts(_, _ string, _ []string, _ string, _ telemetry.Options) telemetry.Counter {
+ return &slsCounter{}
+
+}
+
+func (t *noopImpl) NewSimpleCounter(subsystem, name, help string) telemetry.SimpleCounter {
+ return t.NewSimpleCounterWithOpts(subsystem, name, help, telemetry.DefaultOptions)
+}
+
+func (t *noopImpl) NewSimpleCounterWithOpts(_, _, _ string, _ telemetry.Options) telemetry.SimpleCounter {
+ return &simpleNoOpCounter{}
+
+}
+
+func (t *noopImpl) NewGauge(subsystem, name string, tags []string, help string) telemetry.Gauge {
+ return t.NewGaugeWithOpts(subsystem, name, tags, help, telemetry.DefaultOptions)
+}
+
+func (t *noopImpl) NewGaugeWithOpts(_, _ string, _ []string, _ string, _ telemetry.Options) telemetry.Gauge {
+ return &slsGauge{}
+
+}
+
+func (t *noopImpl) NewSimpleGauge(subsystem, name, help string) telemetry.SimpleGauge {
+ return t.NewSimpleGaugeWithOpts(subsystem, name, help, telemetry.DefaultOptions)
+}
+
+func (t *noopImpl) NewSimpleGaugeWithOpts(_, _, _ string, _ telemetry.Options) telemetry.SimpleGauge {
+ return &simpleNoOpGauge{}
+
+}
+
+func (t *noopImpl) NewHistogram(subsystem, name string, tags []string, help string, buckets []float64) telemetry.Histogram {
+ return t.NewHistogramWithOpts(subsystem, name, tags, help, buckets, telemetry.DefaultOptions)
+}
+
+func (t *noopImpl) NewHistogramWithOpts(_, _ string, _ []string, _ string, _ []float64, _ telemetry.Options) telemetry.Histogram {
+ return &slsHistogram{}
+}
+
+func (t *noopImpl) NewSimpleHistogram(subsystem, name, help string, buckets []float64) telemetry.SimpleHistogram {
+ return t.NewSimpleHistogramWithOpts(subsystem, name, help, buckets, telemetry.DefaultOptions)
+}
+
+func (t *noopImpl) NewSimpleHistogramWithOpts(_, _, _ string, _ []float64, _ telemetry.Options) telemetry.SimpleHistogram {
+ return &simpleNoOpHistogram{}
+}
+
+func (t *noopImpl) Meter(_ string, _ ...telemetry.MeterOption) telemetry.Meter {
+ return nil
+}
+
+func (t *noopImpl) RegisterCollector(telemetry.Collector) {}
+
+func (t *noopImpl) UnregisterCollector(telemetry.Collector) bool {
+ return true
+}
+
+func (t *noopImpl) Gather(bool) ([]*telemetry.MetricFamily, error) {
+ return nil, nil
+}
+
+// GetCompatComponent returns a component wrapping telemetry global variables
+// TODO (components): Remove this when all telemetry is migrated to the component
+func GetCompatComponent() telemetry.Component {
+ return newTelemetry()
+}
+
+// Module defines the fx options for this component.
+func Module() fxutil.Module {
+ return fxutil.Component(
+ fx.Provide(newTelemetry))
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/options.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/options.go
new file mode 100644
index 0000000000..8aee6e15f6
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/options.go
@@ -0,0 +1,40 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetry
+
+import "fmt"
+
+// Options for telemetry metrics.
+// Creating an Options struct without specifying any of its fields should be the
+// equivalent of using the DefaultOptions var.
+type Options struct {
+ // NoDoubleUnderscoreSep is set to true when you don't want to
+ // separate the subsystem and the name with a double underscore separator.
+ NoDoubleUnderscoreSep bool
+
+ // DefaultMetric exports metric by default via built-in agent_telemetry core check.
+ DefaultMetric bool
+}
+
+// DefaultOptions for telemetry metrics which don't need to specify any option.
+var DefaultOptions = Options{
+ // By default, we want to separate the subsystem and the metric name with a
+ // double underscore to be able to replace it later in the process.
+ NoDoubleUnderscoreSep: false,
+}
+
+// NameWithSeparator returns name prefixed according to NoDoubleUnderscoreOption.
+func (opts *Options) NameWithSeparator(subsystem, name string) string {
+ // subsystem is optional
+ if subsystem != "" && !opts.NoDoubleUnderscoreSep {
+ // Prefix metrics with a _, prometheus will add a second _
+ // It will create metrics with a custom separator and
+ // will let us replace it to a dot later in the process.
+ return fmt.Sprintf("_%s", name)
+ }
+
+ return name
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/simple_counter.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/simple_counter.go
new file mode 100644
index 0000000000..2e64868d55
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/simple_counter.go
@@ -0,0 +1,16 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetry
+
+// SimpleCounter tracks how many times something is happening.
+type SimpleCounter interface {
+ // Inc increments the counter.
+ Inc()
+ // Add increments the counter by given amount.
+ Add(float64)
+ // Get gets the current counter value
+ Get() float64
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/simple_gauge.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/simple_gauge.go
new file mode 100644
index 0000000000..bdacc48e21
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/simple_gauge.go
@@ -0,0 +1,22 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetry
+
+// SimpleGauge tracks how many times something is happening.
+type SimpleGauge interface {
+ // Inc increments the gaguge.
+ Inc()
+ // Dec decrements the gauge.
+ Dec()
+ // Add increments the gauge by given amount.
+ Add(float64)
+ // Sub decrements the gauge by given amount.
+ Sub(float64)
+ // Set sets the value of the gauge.
+ Set(float64)
+ // Get gets the value of the gauge.
+ Get() float64
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/simple_histogram.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/simple_histogram.go
new file mode 100644
index 0000000000..3c9c53707e
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/simple_histogram.go
@@ -0,0 +1,28 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetry
+
+// HistogramValue is a struct representing the internal histogram state
+type HistogramValue struct {
+ Count uint64
+ Sum float64
+ Buckets []Bucket
+}
+
+// Bucket is a struct representing the internal bucket state
+type Bucket struct {
+ UpperBound float64
+ Count uint64
+}
+
+// SimpleHistogram tracks how many times something is happening.
+type SimpleHistogram interface {
+ // Observe the value to the Histogram value.
+ Observe(value float64)
+
+ // Get gets the current histogram values
+ Get() HistogramValue
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/prom_counter.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/prom_counter.go
new file mode 100644
index 0000000000..e8f6fafb21
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/prom_counter.go
@@ -0,0 +1,82 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetryimpl
+
+import (
+ "github.com/DataDog/datadog-agent/comp/core/telemetry"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// Counter implementation using Prometheus.
+type promCounter struct {
+ pc *prometheus.CounterVec
+}
+
+// InitializeToZero creates the counter with the given tags and initializes it to 0.
+// This method is intended to be used when the counter value is important to
+// send even before any incrementing/addition is done on it.
+func (c *promCounter) InitializeToZero(tagsValue ...string) {
+ // By requesting a counter for a set of tags, we are creating and initializing
+ // the counter at 0. See the following for more info:
+ // https://github.com/prometheus/client_golang/blob/v1.9.0/prometheus/counter.go#L194-L196
+ c.pc.WithLabelValues(tagsValue...)
+}
+
+// Add adds the given value to the counter with the given tags value.
+//
+// If the value is < 0, no add takes place, as the counter is monotonic.
+// The prometheus client would panic in such a case.
+func (c *promCounter) Add(value float64, tagsValue ...string) {
+ if value > 0 {
+ c.pc.WithLabelValues(tagsValue...).Add(value)
+ }
+}
+
+// AddWithTags adds the given value to the counter with the given tags.
+// Even if less convenient, this signature could be used in hot path
+// instead of Add(float64, ...string) to avoid escaping the parameters on the heap.
+//
+// If the value is < 0, no add takes place, as the counter is monotonic.
+// The prometheus client would panic in such a case.
+func (c *promCounter) AddWithTags(value float64, tags map[string]string) {
+ if value > 0 {
+ c.pc.With(tags).Add(value)
+ }
+}
+
+// Inc increments the counter with the given tags value.
+func (c *promCounter) Inc(tagsValue ...string) {
+ c.pc.WithLabelValues(tagsValue...).Inc()
+}
+
+// IncWithTags increments the counter with the given tags.
+// Even if less convenient, this signature could be used in hot path
+// instead of Inc(...string) to avoid escaping the parameters on the heap.
+func (c *promCounter) IncWithTags(tags map[string]string) {
+ c.pc.With(tags).Inc()
+}
+
+// Delete deletes the value for the counter with the given tags value.
+func (c *promCounter) Delete(tagsValue ...string) {
+ c.pc.DeleteLabelValues(tagsValue...)
+}
+
+// DeleteWithTags deletes the value for the counter with the given tags.
+// Even if less convenient, this signature could be used in hot path
+// instead of Delete(...string) to avoid escaping the parameters on the heap.
+func (c *promCounter) DeleteWithTags(tags map[string]string) {
+ c.pc.Delete(tags)
+}
+
+// WithValues returns SimpleCounter for this metric with the given tag values.
+func (c *promCounter) WithValues(tagsValue ...string) telemetry.SimpleCounter {
+ return &simplePromCounter{c: c.pc.WithLabelValues(tagsValue...)}
+}
+
+// Withtags returns SimpleCounter for this metric with the given tag values.
+func (c *promCounter) WithTags(tags map[string]string) telemetry.SimpleCounter {
+ return &simplePromCounter{c: c.pc.With(tags)}
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/prom_gauge.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/prom_gauge.go
new file mode 100644
index 0000000000..bf66fefc93
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/prom_gauge.go
@@ -0,0 +1,56 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetryimpl
+
+import (
+ "github.com/DataDog/datadog-agent/comp/core/telemetry"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// Gauge implementation using Prometheus.
+type promGauge struct {
+ pg *prometheus.GaugeVec
+}
+
+// Set stores the value for the given tags.
+func (g *promGauge) Set(value float64, tagsValue ...string) {
+ g.pg.WithLabelValues(tagsValue...).Set(value)
+}
+
+// Inc increments the Gauge value.
+func (g *promGauge) Inc(tagsValue ...string) {
+ g.pg.WithLabelValues(tagsValue...).Inc()
+}
+
+// Dec decrements the Gauge value.
+func (g *promGauge) Dec(tagsValue ...string) {
+ g.pg.WithLabelValues(tagsValue...).Dec()
+}
+
+// Delete deletes the value for the Gauge with the given tags.
+func (g *promGauge) Delete(tagsValue ...string) {
+ g.pg.DeleteLabelValues(tagsValue...)
+}
+
+// Add adds the value to the Gauge value.
+func (g *promGauge) Add(value float64, tagsValue ...string) {
+ g.pg.WithLabelValues(tagsValue...).Add(value)
+}
+
+// Sub subtracts the value to the Gauge value.
+func (g *promGauge) Sub(value float64, tagsValue ...string) {
+ g.pg.WithLabelValues(tagsValue...).Sub(value)
+}
+
+// WithValues returns SimpleGauge for this metric with the given tag values.
+func (g *promGauge) WithValues(tagsValue ...string) telemetry.SimpleGauge {
+ return &simplePromGauge{g: g.pg.WithLabelValues(tagsValue...)}
+}
+
+// Withtags returns SimpleGauge for this metric with the given tag values.
+func (g *promGauge) WithTags(tags map[string]string) telemetry.SimpleGauge {
+ return &simplePromGauge{g: g.pg.With(tags)}
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/prom_histogram.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/prom_histogram.go
new file mode 100644
index 0000000000..c90d8b04a7
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/prom_histogram.go
@@ -0,0 +1,38 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetryimpl
+
+import (
+ "github.com/DataDog/datadog-agent/comp/core/telemetry"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// Prometheus implements histograms using Prometheus.
+type promHistogram struct {
+ ph *prometheus.HistogramVec
+}
+
+// Observe samples the value for the given tags.
+func (h *promHistogram) Observe(value float64, tagsValue ...string) {
+ h.ph.WithLabelValues(tagsValue...).Observe(value)
+}
+
+// Delete deletes the value for the Histogram with the given tags.
+func (h *promHistogram) Delete(tagsValue ...string) {
+ h.ph.DeleteLabelValues(tagsValue...)
+}
+
+// WithValues returns SimpleHistogram for this metric with the given tag values.
+func (h *promHistogram) WithValues(tagsValue ...string) telemetry.SimpleHistogram {
+ // Prometheus does not directly expose the underlying histogram so we have to cast it.
+ return &simplePromHistogram{h: h.ph.WithLabelValues(tagsValue...).(prometheus.Histogram)}
+}
+
+// WithValues returns SimpleHistogram for this metric with the given tag values.
+func (h *promHistogram) WithTags(tags map[string]string) telemetry.SimpleHistogram {
+ // Prometheus does not directly expose the underlying histogram so we have to cast it.
+ return &simplePromHistogram{h: h.ph.With(tags).(prometheus.Histogram)}
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/simple_prom_counter.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/simple_prom_counter.go
new file mode 100644
index 0000000000..195520758f
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/simple_prom_counter.go
@@ -0,0 +1,33 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetryimpl
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Counter implementation using Prometheus.
+type simplePromCounter struct {
+ c prometheus.Counter
+}
+
+// Inc increments the counter.
+func (s *simplePromCounter) Inc() {
+ s.c.Inc()
+}
+
+// Add increments the counter by given amount.
+func (s *simplePromCounter) Add(val float64) {
+ s.c.Add(val)
+}
+
+// Get gets the current counter value
+func (s *simplePromCounter) Get() float64 {
+ metric := &dto.Metric{}
+ _ = s.c.Write(metric)
+ return *metric.Counter.Value
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/simple_prom_gauge.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/simple_prom_gauge.go
new file mode 100644
index 0000000000..1f81ddbc86
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/simple_prom_gauge.go
@@ -0,0 +1,47 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetryimpl
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+)
+
+type simplePromGauge struct {
+ g prometheus.Gauge
+}
+
+// Inc increments the gauge.
+func (s *simplePromGauge) Inc() {
+ s.g.Inc()
+}
+
+// Dec decrements the gauge.
+func (s *simplePromGauge) Dec() {
+ s.g.Dec()
+}
+
+// Add increments the gauge by given amount.
+func (s *simplePromGauge) Add(val float64) {
+ s.g.Add(val)
+}
+
+// Sub decrements the gauge by given amount.
+func (s *simplePromGauge) Sub(val float64) {
+ s.g.Sub(val)
+}
+
+// Set sets the value of the gauge.
+func (s *simplePromGauge) Set(val float64) {
+ s.g.Set(val)
+}
+
+// Get gets the value of the gauge.
+func (s *simplePromGauge) Get() float64 {
+ metric := &dto.Metric{}
+ _ = s.g.Write(metric)
+ return *metric.Gauge.Value
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/simple_prom_histogram.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/simple_prom_histogram.go
new file mode 100644
index 0000000000..4b488052b8
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/simple_prom_histogram.go
@@ -0,0 +1,41 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetryimpl
+
+import (
+ "github.com/DataDog/datadog-agent/comp/core/telemetry"
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Prometheus implements histograms using Prometheus.
+type simplePromHistogram struct {
+ h prometheus.Histogram
+}
+
+// Observe the value to the Histogram value.
+func (s *simplePromHistogram) Observe(value float64) {
+ s.h.Observe(value)
+}
+
+func (s *simplePromHistogram) Get() telemetry.HistogramValue {
+ m := &dto.Metric{}
+ _ = s.h.Write(m)
+ hv := telemetry.HistogramValue{
+ Count: *m.Histogram.SampleCount,
+ Sum: *m.Histogram.SampleSum,
+ Buckets: make([]telemetry.Bucket, 0, len(m.Histogram.Bucket)),
+ }
+
+ for _, b := range m.Histogram.Bucket {
+ hv.Buckets = append(hv.Buckets, telemetry.Bucket{
+ UpperBound: *b.UpperBound,
+ Count: *b.CumulativeCount,
+ })
+
+ }
+ return hv
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/telemetry.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/telemetry.go
new file mode 100644
index 0000000000..8496c266f7
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/telemetry.go
@@ -0,0 +1,259 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+// Package telemetryimpl implements the telemetry component interface.
+package telemetryimpl
+
+import (
+ "net/http"
+ "sync"
+
+ "github.com/DataDog/datadog-agent/comp/core/telemetry"
+ "github.com/DataDog/datadog-agent/pkg/util/fxutil"
+ promOtel "go.opentelemetry.io/otel/exporters/prometheus"
+ "go.uber.org/fx"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/collectors"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+ "go.opentelemetry.io/otel/metric"
+ sdk "go.opentelemetry.io/otel/sdk/metric"
+)
+
+// Module defines the fx options for this component.
+func Module() fxutil.Module {
+ return fxutil.Component(
+ fx.Provide(newTelemetry))
+}
+
+// TODO (components): Remove the globals and move this into `newTelemetry` after all telemetry is migrated to the component
+var (
+ registry = newRegistry()
+ provider = newProvider(registry)
+ mutex = sync.Mutex{}
+
+ defaultRegistry = prometheus.NewRegistry()
+)
+
+type telemetryImpl struct {
+ mutex *sync.Mutex
+ registry *prometheus.Registry
+ meterProvider *sdk.MeterProvider
+
+ defaultRegistry *prometheus.Registry
+}
+
+func newRegistry() *prometheus.Registry {
+ reg := prometheus.NewRegistry()
+ reg.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}))
+ reg.MustRegister(collectors.NewGoCollector(collectors.WithGoCollectorRuntimeMetrics(collectors.MetricsGC, collectors.MetricsMemory, collectors.MetricsScheduler)))
+ return reg
+}
+
+func newProvider(reg *prometheus.Registry) *sdk.MeterProvider {
+ exporter, err := promOtel.New(promOtel.WithRegisterer(reg))
+
+ if err != nil {
+ panic(err)
+ }
+
+ return sdk.NewMeterProvider(sdk.WithReader(exporter))
+}
+
+func newTelemetry() telemetry.Component {
+ return &telemetryImpl{
+ mutex: &mutex,
+ registry: registry,
+ meterProvider: provider,
+
+ defaultRegistry: defaultRegistry,
+ }
+}
+
+// GetCompatComponent returns a component wrapping telemetry global variables
+// TODO (components): Remove this when all telemetry is migrated to the component
+func GetCompatComponent() telemetry.Component {
+ return newTelemetry()
+}
+
+func (t *telemetryImpl) Handler() http.Handler {
+ return promhttp.HandlerFor(t.registry, promhttp.HandlerOpts{})
+}
+
+func (t *telemetryImpl) Reset() {
+ mutex.Lock()
+ defer mutex.Unlock()
+ registry = prometheus.NewRegistry()
+ t.registry = registry
+}
+
+// RegisterCollector Registers a Collector with the prometheus registry
+func (t *telemetryImpl) RegisterCollector(c prometheus.Collector) {
+ registry.MustRegister(c)
+}
+
+// UnregisterCollector unregisters a Collector with the prometheus registry
+func (t *telemetryImpl) UnregisterCollector(c prometheus.Collector) bool {
+ return registry.Unregister(c)
+}
+
+func (t *telemetryImpl) Meter(name string, opts ...telemetry.MeterOption) metric.Meter {
+ return t.meterProvider.Meter(name, opts...)
+}
+
+func (t *telemetryImpl) NewCounter(subsystem, name string, tags []string, help string) telemetry.Counter {
+ return t.NewCounterWithOpts(subsystem, name, tags, help, telemetry.DefaultOptions)
+}
+
+func (t *telemetryImpl) NewCounterWithOpts(subsystem, name string, tags []string, help string, opts telemetry.Options) telemetry.Counter {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+
+ name = opts.NameWithSeparator(subsystem, name)
+
+ c := &promCounter{
+ pc: prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Subsystem: subsystem,
+ Name: name,
+ Help: help,
+ },
+ tags,
+ ),
+ }
+ t.mustRegister(c.pc, opts)
+ return c
+}
+
+func (t *telemetryImpl) NewSimpleCounter(subsystem, name, help string) telemetry.SimpleCounter {
+ return t.NewSimpleCounterWithOpts(subsystem, name, help, telemetry.DefaultOptions)
+}
+
+func (t *telemetryImpl) NewSimpleCounterWithOpts(subsystem, name, help string, opts telemetry.Options) telemetry.SimpleCounter {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+
+ name = opts.NameWithSeparator(subsystem, name)
+
+ pc := prometheus.NewCounter(prometheus.CounterOpts{
+ Subsystem: subsystem,
+ Name: name,
+ Help: help,
+ })
+
+ t.mustRegister(pc, opts)
+ return &simplePromCounter{c: pc}
+}
+
+func (t *telemetryImpl) NewGauge(subsystem, name string, tags []string, help string) telemetry.Gauge {
+ return t.NewGaugeWithOpts(subsystem, name, tags, help, telemetry.DefaultOptions)
+}
+
+func (t *telemetryImpl) NewGaugeWithOpts(subsystem, name string, tags []string, help string, opts telemetry.Options) telemetry.Gauge {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+
+ name = opts.NameWithSeparator(subsystem, name)
+
+ g := &promGauge{
+ pg: prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Subsystem: subsystem,
+ Name: name,
+ Help: help,
+ },
+ tags,
+ ),
+ }
+ t.mustRegister(g.pg, opts)
+ return g
+}
+
+func (t *telemetryImpl) NewSimpleGauge(subsystem, name, help string) telemetry.SimpleGauge {
+ return t.NewSimpleGaugeWithOpts(subsystem, name, help, telemetry.DefaultOptions)
+}
+
+func (t *telemetryImpl) NewSimpleGaugeWithOpts(subsystem, name, help string, opts telemetry.Options) telemetry.SimpleGauge {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+
+ name = opts.NameWithSeparator(subsystem, name)
+
+ pc := &simplePromGauge{g: prometheus.NewGauge(prometheus.GaugeOpts{
+ Subsystem: subsystem,
+ Name: name,
+ Help: help,
+ })}
+
+ t.mustRegister(pc.g, opts)
+ return pc
+}
+
+func (t *telemetryImpl) NewHistogram(subsystem, name string, tags []string, help string, buckets []float64) telemetry.Histogram {
+ return t.NewHistogramWithOpts(subsystem, name, tags, help, buckets, telemetry.DefaultOptions)
+}
+
+func (t *telemetryImpl) NewHistogramWithOpts(subsystem, name string, tags []string, help string, buckets []float64, opts telemetry.Options) telemetry.Histogram {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+
+ name = opts.NameWithSeparator(subsystem, name)
+
+ h := &promHistogram{
+ ph: prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Subsystem: subsystem,
+ Name: name,
+ Help: help,
+ Buckets: buckets,
+ },
+ tags,
+ ),
+ }
+
+ t.mustRegister(h.ph, opts)
+
+ return h
+}
+
+func (t *telemetryImpl) NewSimpleHistogram(subsystem, name, help string, buckets []float64) telemetry.SimpleHistogram {
+ return t.NewSimpleHistogramWithOpts(subsystem, name, help, buckets, telemetry.DefaultOptions)
+}
+
+func (t *telemetryImpl) NewSimpleHistogramWithOpts(subsystem, name, help string, buckets []float64, opts telemetry.Options) telemetry.SimpleHistogram {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+
+ name = opts.NameWithSeparator(subsystem, name)
+
+ pc := &simplePromHistogram{h: prometheus.NewHistogram(prometheus.HistogramOpts{
+ Subsystem: subsystem,
+ Name: name,
+ Help: help,
+ Buckets: buckets,
+ })}
+
+ t.mustRegister(pc.h, opts)
+ return pc
+}
+
+func (t *telemetryImpl) mustRegister(c prometheus.Collector, opts telemetry.Options) {
+ if opts.DefaultMetric {
+ t.defaultRegistry.MustRegister(c)
+ } else {
+ t.registry.MustRegister(c)
+ }
+}
+
+func (t *telemetryImpl) Gather(defaultGather bool) ([]*telemetry.MetricFamily, error) {
+ if defaultGather {
+ return t.defaultRegistry.Gather()
+ }
+
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+
+ return registry.Gather()
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/telemtry_mock.go b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/telemtry_mock.go
new file mode 100644
index 0000000000..a812ca8df6
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl/telemtry_mock.go
@@ -0,0 +1,54 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+//go:build test
+
+package telemetryimpl
+
+import (
+ "github.com/DataDog/datadog-agent/comp/core/telemetry"
+ "github.com/DataDog/datadog-agent/pkg/util/fxutil"
+ "github.com/prometheus/client_golang/prometheus"
+ sdk "go.opentelemetry.io/otel/sdk/metric"
+ "go.uber.org/fx"
+)
+
+// MockModule defines the fx options for the mock component.
+func MockModule() fxutil.Module {
+ return fxutil.Component(
+ fx.Provide(newMock),
+ fx.Provide(func(m telemetry.Mock) telemetry.Component { return m }))
+}
+
+type telemetryImplMock struct {
+ telemetryImpl
+}
+
+func newMock() telemetry.Mock {
+ reg := prometheus.NewRegistry()
+ provider := newProvider(reg)
+
+ telemetry := &telemetryImplMock{
+ telemetryImpl{
+ mutex: &mutex,
+ registry: reg,
+ meterProvider: provider,
+ },
+ }
+
+ return telemetry
+}
+
+func (t *telemetryImplMock) GetRegistry() *prometheus.Registry {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ return t.registry
+}
+
+func (t *telemetryImplMock) GetMeterProvider() *sdk.MeterProvider {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ return t.meterProvider
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/def/LICENSE b/vendor/github.com/DataDog/datadog-agent/comp/def/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/def/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/def/lifecycle.go b/vendor/github.com/DataDog/datadog-agent/comp/def/lifecycle.go
new file mode 100644
index 0000000000..756dd51c20
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/def/lifecycle.go
@@ -0,0 +1,24 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package compdef defines basic types used for components
+package compdef
+
+import (
+ "context"
+)
+
+type lchFunc func(context.Context) error
+
+// Hook represents a function pair for a component's startup and shutdown
+type Hook struct {
+ OnStart lchFunc
+ OnStop lchFunc
+}
+
+// Lifecycle may be added to a component's requires struct if it wants to add hooks
+type Lifecycle interface {
+ Append(h Hook)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/def/type.go b/vendor/github.com/DataDog/datadog-agent/comp/def/type.go
new file mode 100644
index 0000000000..87581b71a0
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/def/type.go
@@ -0,0 +1,10 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package compdef defines basic types used for components
+package compdef
+
+// Out can be put in a struct that represents a collection of Components
+type Out struct{}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/LICENSE b/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/channel_message.go b/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/channel_message.go
new file mode 100644
index 0000000000..956e5eaa57
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/channel_message.go
@@ -0,0 +1,41 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//nolint:revive // TODO(AML) Fix revive linter
+package config
+
+import "time"
+
+// ChannelMessage represents a log line sent to datadog, with its metadata
+type ChannelMessage struct {
+ Content []byte
+ // Optional. Must be UTC. If not provided, time.Now().UTC() will be used
+ // Used in the Serverless Agent
+ Timestamp time.Time
+ // Optional.
+ // Used in the Serverless Agent
+ Lambda *Lambda
+ IsError bool
+}
+
+// Lambda is a struct storing information about the Lambda function and function execution.
+type Lambda struct {
+ ARN string
+ RequestID string
+ FunctionName string
+}
+
+// NewChannelMessageFromLambda construts a message with content and with the given timestamp and Lambda metadata
+func NewChannelMessageFromLambda(content []byte, utcTime time.Time, ARN, reqID string, isError bool) *ChannelMessage {
+ return &ChannelMessage{
+ Content: content,
+ Timestamp: utcTime,
+ Lambda: &Lambda{
+ ARN: ARN,
+ RequestID: reqID,
+ },
+ IsError: isError,
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/config.go b/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/config.go
new file mode 100644
index 0000000000..5c188e2139
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/config.go
@@ -0,0 +1,345 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package config
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ pkgconfigutils "github.com/DataDog/datadog-agent/pkg/config/utils"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+// ContainerCollectAll is the name of the docker integration that collect logs from all containers
+const ContainerCollectAll = "container_collect_all"
+
+// logs-intake endpoint prefix.
+const (
+ tcpEndpointPrefix = "agent-intake.logs."
+ httpEndpointPrefix = "agent-http-intake.logs."
+ serverlessHTTPEndpointPrefix = "http-intake.logs."
+)
+
+// AgentJSONIntakeProtocol agent json protocol
+const AgentJSONIntakeProtocol = "agent-json"
+
+// DefaultIntakeProtocol indicates that no special protocol is in use for the endpoint intake track type.
+const DefaultIntakeProtocol IntakeProtocol = ""
+
+// DefaultIntakeOrigin indicates that no special DD_SOURCE header is in use for the endpoint intake track type.
+const DefaultIntakeOrigin IntakeOrigin = "agent"
+
+// ServerlessIntakeOrigin is the lambda extension origin
+const ServerlessIntakeOrigin IntakeOrigin = "lambda-extension"
+
+// logs-intake endpoints depending on the site and environment.
+var logsEndpoints = map[string]int{
+ "agent-intake.logs.datadoghq.com": 10516,
+ "agent-intake.logs.datadoghq.eu": 443,
+ "agent-intake.logs.datad0g.com": 10516,
+ "agent-intake.logs.datad0g.eu": 443,
+}
+
+// HTTPConnectivity is the status of the HTTP connectivity
+type HTTPConnectivity bool
+
+var (
+ // HTTPConnectivitySuccess is the status for successful HTTP connectivity
+ HTTPConnectivitySuccess HTTPConnectivity = true
+ // HTTPConnectivityFailure is the status for failed HTTP connectivity
+ HTTPConnectivityFailure HTTPConnectivity = false
+)
+
+// GlobalProcessingRules returns the global processing rules to apply to all logs.
+func GlobalProcessingRules(coreConfig pkgconfigmodel.Reader) ([]*ProcessingRule, error) {
+ var rules []*ProcessingRule
+ var err error
+ raw := coreConfig.Get("logs_config.processing_rules")
+ if raw == nil {
+ return rules, nil
+ }
+ if s, ok := raw.(string); ok && s != "" {
+ err = json.Unmarshal([]byte(s), &rules)
+ } else {
+ err = coreConfig.UnmarshalKey("logs_config.processing_rules", &rules)
+ }
+ if err != nil {
+ return nil, err
+ }
+ err = ValidateProcessingRules(rules)
+ if err != nil {
+ return nil, err
+ }
+ err = CompileProcessingRules(rules)
+ if err != nil {
+ return nil, err
+ }
+ return rules, nil
+}
+
+// HasMultiLineRule returns true if the rule set contains a multi_line rule
+func HasMultiLineRule(rules []*ProcessingRule) bool {
+ for _, rule := range rules {
+ if rule.Type == MultiLine {
+ return true
+ }
+ }
+ return false
+}
+
+// BuildEndpoints returns the endpoints to send logs.
+func BuildEndpoints(coreConfig pkgconfigmodel.Reader, httpConnectivity HTTPConnectivity, intakeTrackType IntakeTrackType, intakeProtocol IntakeProtocol, intakeOrigin IntakeOrigin) (*Endpoints, error) {
+ return BuildEndpointsWithConfig(coreConfig, defaultLogsConfigKeys(coreConfig), httpEndpointPrefix, httpConnectivity, intakeTrackType, intakeProtocol, intakeOrigin)
+}
+
+// BuildEndpointsWithVectorOverride returns the endpoints to send logs and enforce Vector override config keys
+func BuildEndpointsWithVectorOverride(coreConfig pkgconfigmodel.Reader, httpConnectivity HTTPConnectivity, intakeTrackType IntakeTrackType, intakeProtocol IntakeProtocol, intakeOrigin IntakeOrigin) (*Endpoints, error) {
+ return BuildEndpointsWithConfig(coreConfig, defaultLogsConfigKeysWithVectorOverride(coreConfig), httpEndpointPrefix, httpConnectivity, intakeTrackType, intakeProtocol, intakeOrigin)
+}
+
+// BuildEndpointsWithConfig returns the endpoints to send logs.
+func BuildEndpointsWithConfig(coreConfig pkgconfigmodel.Reader, logsConfig *LogsConfigKeys, endpointPrefix string, httpConnectivity HTTPConnectivity, intakeTrackType IntakeTrackType, intakeProtocol IntakeProtocol, intakeOrigin IntakeOrigin) (*Endpoints, error) {
+ if logsConfig.devModeNoSSL() {
+ log.Warnf("Use of illegal configuration parameter, if you need to send your logs to a proxy, "+
+ "please use '%s' and '%s' instead", logsConfig.getConfigKey("logs_dd_url"), logsConfig.getConfigKey("logs_no_ssl"))
+ }
+
+ mrfEnabled := coreConfig.GetBool("multi_region_failover.enabled")
+ if logsConfig.isForceHTTPUse() || logsConfig.obsPipelineWorkerEnabled() || mrfEnabled || (bool(httpConnectivity) && !(logsConfig.isForceTCPUse() || logsConfig.isSocks5ProxySet() || logsConfig.hasAdditionalEndpoints())) {
+ return BuildHTTPEndpointsWithConfig(coreConfig, logsConfig, endpointPrefix, intakeTrackType, intakeProtocol, intakeOrigin)
+ }
+ log.Warnf("You are currently sending Logs to Datadog through TCP (either because %s or %s is set or the HTTP connectivity test has failed) "+
+ "To benefit from increased reliability and better network performances, "+
+ "we strongly encourage switching over to compressed HTTPS which is now the default protocol.",
+ logsConfig.getConfigKey("force_use_tcp"), logsConfig.getConfigKey("socks5_proxy_address"))
+ return buildTCPEndpoints(coreConfig, logsConfig)
+}
+
+// BuildServerlessEndpoints returns the endpoints to send logs for the Serverless agent.
+func BuildServerlessEndpoints(coreConfig pkgconfigmodel.Reader, intakeTrackType IntakeTrackType, intakeProtocol IntakeProtocol) (*Endpoints, error) {
+ return BuildHTTPEndpointsWithConfig(coreConfig, defaultLogsConfigKeysWithVectorOverride(coreConfig), serverlessHTTPEndpointPrefix, intakeTrackType, intakeProtocol, ServerlessIntakeOrigin)
+}
+
+// ExpectedTagsDuration returns a duration of the time expected tags will be submitted for.
+func ExpectedTagsDuration(coreConfig pkgconfigmodel.Reader) time.Duration {
+ return defaultLogsConfigKeys(coreConfig).expectedTagsDuration()
+}
+
+// IsExpectedTagsSet returns boolean showing if expected tags feature is enabled.
+func IsExpectedTagsSet(coreConfig pkgconfigmodel.Reader) bool {
+ return ExpectedTagsDuration(coreConfig) > 0
+}
+
+func buildTCPEndpoints(coreConfig pkgconfigmodel.Reader, logsConfig *LogsConfigKeys) (*Endpoints, error) {
+ useProto := logsConfig.devModeUseProto()
+ main := NewTCPEndpoint(logsConfig)
+
+ if logsDDURL, defined := logsConfig.logsDDURL(); defined {
+ // Proxy settings, expect 'logs_config.logs_dd_url' to respect the format ':'
+ // and '' to be an integer.
+ // By default ssl is enabled ; to disable ssl set 'logs_config.logs_no_ssl' to true.
+ host, port, err := parseAddress(logsDDURL)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse %s: %v", logsDDURL, err)
+ }
+ main.Host = host
+ main.Port = port
+ main.useSSL = !logsConfig.logsNoSSL()
+ } else if logsConfig.usePort443() {
+ main.Host = logsConfig.ddURL443()
+ main.Port = 443
+ main.useSSL = true
+ } else {
+ // If no proxy is set, we default to 'logs_config.dd_url' if set, or to 'site'.
+ // if none of them is set, we default to the US agent endpoint.
+ main.Host = pkgconfigutils.GetMainEndpoint(coreConfig, tcpEndpointPrefix, logsConfig.getConfigKey("dd_url"))
+ if port, found := logsEndpoints[main.Host]; found {
+ main.Port = port
+ } else {
+ main.Port = logsConfig.ddPort()
+ }
+ main.useSSL = !logsConfig.devModeNoSSL()
+ }
+
+ additionals := loadTCPAdditionalEndpoints(main, logsConfig)
+ return NewEndpoints(main, additionals, useProto, false), nil
+}
+
+// BuildHTTPEndpoints returns the HTTP endpoints to send logs to.
+func BuildHTTPEndpoints(coreConfig pkgconfigmodel.Reader, intakeTrackType IntakeTrackType, intakeProtocol IntakeProtocol, intakeOrigin IntakeOrigin) (*Endpoints, error) {
+ return BuildHTTPEndpointsWithConfig(coreConfig, defaultLogsConfigKeys(coreConfig), httpEndpointPrefix, intakeTrackType, intakeProtocol, intakeOrigin)
+}
+
+// BuildHTTPEndpointsWithVectorOverride returns the HTTP endpoints to send logs to.
+func BuildHTTPEndpointsWithVectorOverride(coreConfig pkgconfigmodel.Reader, intakeTrackType IntakeTrackType, intakeProtocol IntakeProtocol, intakeOrigin IntakeOrigin) (*Endpoints, error) {
+ return BuildHTTPEndpointsWithConfig(coreConfig, defaultLogsConfigKeysWithVectorOverride(coreConfig), httpEndpointPrefix, intakeTrackType, intakeProtocol, intakeOrigin)
+}
+
+// BuildHTTPEndpointsWithConfig uses two arguments that instructs it how to access configuration parameters, then returns the HTTP endpoints to send logs to. This function is able to default to the 'classic' BuildHTTPEndpoints() w ldHTTPEndpointsWithConfigdefault variables logsConfigDefaultKeys and httpEndpointPrefix
+func BuildHTTPEndpointsWithConfig(coreConfig pkgconfigmodel.Reader, logsConfig *LogsConfigKeys, endpointPrefix string, intakeTrackType IntakeTrackType, intakeProtocol IntakeProtocol, intakeOrigin IntakeOrigin) (*Endpoints, error) {
+ // Provide default values for legacy settings when the configuration key does not exist
+ defaultNoSSL := logsConfig.logsNoSSL()
+
+ main := NewHTTPEndpoint(logsConfig)
+
+ if logsConfig.useV2API() && intakeTrackType != "" {
+ main.Version = EPIntakeVersion2
+ main.TrackType = intakeTrackType
+ main.Protocol = intakeProtocol
+ main.Origin = intakeOrigin
+ } else {
+ main.Version = EPIntakeVersion1
+ }
+
+ if vectorURL, vectorURLDefined := logsConfig.getObsPipelineURL(); logsConfig.obsPipelineWorkerEnabled() && vectorURLDefined {
+ host, port, useSSL, err := parseAddressWithScheme(vectorURL, defaultNoSSL, parseAddress)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse %s: %v", vectorURL, err)
+ }
+ main.Host = host
+ main.Port = port
+ main.useSSL = useSSL
+ } else if logsDDURL, logsDDURLDefined := logsConfig.logsDDURL(); logsDDURLDefined {
+ host, port, useSSL, err := parseAddressWithScheme(logsDDURL, defaultNoSSL, parseAddress)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse %s: %v", logsDDURL, err)
+ }
+ main.Host = host
+ main.Port = port
+ main.useSSL = useSSL
+ } else {
+ addr := pkgconfigutils.GetMainEndpoint(coreConfig, endpointPrefix, logsConfig.getConfigKey("dd_url"))
+ host, port, useSSL, err := parseAddressWithScheme(addr, logsConfig.devModeNoSSL(), parseAddressAsHost)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse %s: %v", logsDDURL, err)
+ }
+
+ main.Host = host
+ main.Port = port
+ main.useSSL = useSSL
+ }
+
+ additionals := loadHTTPAdditionalEndpoints(main, logsConfig, intakeTrackType, intakeProtocol, intakeOrigin)
+
+ // Add in the MRF endpoint if MRF is enabled.
+ if coreConfig.GetBool("multi_region_failover.enabled") {
+ mrfURL, err := pkgconfigutils.GetMRFEndpoint(coreConfig, endpointPrefix, "multi_region_failover.dd_url")
+ if err != nil {
+ return nil, fmt.Errorf("cannot construct MRF endpoint: %s", err)
+ }
+
+ mrfHost, mrfPort, mrfUseSSL, err := parseAddressWithScheme(mrfURL, defaultNoSSL, parseAddressAsHost)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse %s: %v", mrfURL, err)
+ }
+
+ e := NewEndpoint(coreConfig.GetString("multi_region_failover.api_key"), mrfHost, mrfPort, mrfUseSSL)
+ e.IsMRF = true
+ e.UseCompression = main.UseCompression
+ e.CompressionLevel = main.CompressionLevel
+ e.BackoffBase = main.BackoffBase
+ e.BackoffMax = main.BackoffMax
+ e.BackoffFactor = main.BackoffFactor
+ e.RecoveryInterval = main.RecoveryInterval
+ e.RecoveryReset = main.RecoveryReset
+ e.Version = main.Version
+ e.TrackType = intakeTrackType
+ e.Protocol = intakeProtocol
+ e.Origin = intakeOrigin
+
+ additionals = append(additionals, e)
+ }
+
+ batchWait := logsConfig.batchWait()
+ batchMaxConcurrentSend := logsConfig.batchMaxConcurrentSend()
+ batchMaxSize := logsConfig.batchMaxSize()
+ batchMaxContentSize := logsConfig.batchMaxContentSize()
+ inputChanSize := logsConfig.inputChanSize()
+
+ return NewEndpointsWithBatchSettings(main, additionals, false, true, batchWait, batchMaxConcurrentSend, batchMaxSize, batchMaxContentSize, inputChanSize), nil
+}
+
+type defaultParseAddressFunc func(string) (host string, port int, err error)
+
+func parseAddressWithScheme(address string, defaultNoSSL bool, defaultParser defaultParseAddressFunc) (host string, port int, useSSL bool, err error) {
+ if strings.HasPrefix(address, "https://") || strings.HasPrefix(address, "http://") {
+ if strings.HasPrefix(address, "https://") && !defaultNoSSL {
+ log.Warn("dd_url set to a URL with an HTTPS prefix and logs_no_ssl set to true. These are conflicting options. In a future release logs_no_ssl will override the dd_url prefix.")
+ }
+ host, port, useSSL, err = parseURL(address)
+ } else {
+ host, port, err = defaultParser(address)
+ if err != nil {
+ err = fmt.Errorf("could not parse %s: %v", address, err)
+ return
+ }
+ useSSL = !defaultNoSSL
+ }
+ return
+}
+
+func parseURL(address string) (host string, port int, useSSL bool, err error) {
+ u, errParse := url.Parse(address)
+ if errParse != nil {
+ err = errParse
+ return
+ }
+ switch u.Scheme {
+ case "https":
+ useSSL = true
+ case "http":
+ useSSL = false
+ }
+ host = u.Hostname()
+ if u.Port() != "" {
+ port, err = strconv.Atoi(u.Port())
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+// parseAddress returns the host and the port of the address.
+func parseAddress(address string) (string, int, error) {
+ host, portString, err := net.SplitHostPort(address)
+ if err != nil {
+ return "", 0, err
+ }
+ port, err := strconv.Atoi(portString)
+ if err != nil {
+ return "", 0, err
+ }
+ return host, port, nil
+}
+
+// parseAddressAsHost returns the host and the port of the address.
+// this function consider that the address is the host
+func parseAddressAsHost(address string) (string, int, error) {
+ return address, 0, nil
+}
+
+// TaggerWarmupDuration is used to configure the tag providers
+func TaggerWarmupDuration(coreConfig pkgconfigmodel.Reader) time.Duration {
+ return defaultLogsConfigKeys(coreConfig).taggerWarmupDuration()
+}
+
+// AggregationTimeout is used when performing aggregation operations
+func AggregationTimeout(coreConfig pkgconfigmodel.Reader) time.Duration {
+ return defaultLogsConfigKeys(coreConfig).aggregationTimeout()
+}
+
+// MaxMessageSizeBytes is used to cap the maximum log message size in bytes
+func MaxMessageSizeBytes(coreConfig pkgconfigmodel.Reader) int {
+ return defaultLogsConfigKeys(coreConfig).maxMessageSizeBytes()
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/config_keys.go b/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/config_keys.go
new file mode 100644
index 0000000000..df5ee1ad4a
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/config_keys.go
@@ -0,0 +1,303 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package config
+
+import (
+ "encoding/json"
+ "time"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+// LogsConfigKeys stores logs configuration keys stored in YAML configuration files
+type LogsConfigKeys struct {
+ prefix string
+ vectorPrefix string
+ config pkgconfigmodel.Reader
+}
+
+// defaultLogsConfigKeys defines the default YAML keys used to retrieve logs configuration
+func defaultLogsConfigKeys(config pkgconfigmodel.Reader) *LogsConfigKeys {
+ return NewLogsConfigKeys("logs_config.", config)
+}
+
+// defaultLogsConfigKeys defines the default YAML keys used to retrieve logs configuration
+func defaultLogsConfigKeysWithVectorOverride(config pkgconfigmodel.Reader) *LogsConfigKeys {
+ return NewLogsConfigKeysWithVector("logs_config.", "logs.", config)
+}
+
+// NewLogsConfigKeys returns a new logs configuration keys set
+func NewLogsConfigKeys(configPrefix string, config pkgconfigmodel.Reader) *LogsConfigKeys {
+ return &LogsConfigKeys{prefix: configPrefix, vectorPrefix: "", config: config}
+}
+
+// NewLogsConfigKeysWithVector returns a new logs configuration keys set with vector config keys enabled
+func NewLogsConfigKeysWithVector(configPrefix, vectorPrefix string, config pkgconfigmodel.Reader) *LogsConfigKeys {
+ return &LogsConfigKeys{prefix: configPrefix, vectorPrefix: vectorPrefix, config: config}
+}
+
+func (l *LogsConfigKeys) getConfig() pkgconfigmodel.Reader {
+ return l.config
+}
+
+func (l *LogsConfigKeys) getConfigKey(key string) string {
+ return l.prefix + key
+}
+
+func isSetAndNotEmpty(config pkgconfigmodel.Reader, key string) bool {
+ return config.IsSet(key) && len(config.GetString(key)) > 0
+}
+
+func (l *LogsConfigKeys) isSetAndNotEmpty(key string) bool {
+ return isSetAndNotEmpty(l.getConfig(), key)
+}
+
+func (l *LogsConfigKeys) ddURL443() string {
+ return l.getConfig().GetString(l.getConfigKey("dd_url_443"))
+}
+
+func (l *LogsConfigKeys) logsDDURL() (string, bool) {
+ configKey := l.getConfigKey("logs_dd_url")
+ return l.getConfig().GetString(configKey), l.isSetAndNotEmpty(configKey)
+}
+
+func (l *LogsConfigKeys) ddPort() int {
+ return l.getConfig().GetInt(l.getConfigKey("dd_port"))
+}
+
+func (l *LogsConfigKeys) isSocks5ProxySet() bool {
+ return len(l.socks5ProxyAddress()) > 0
+}
+
+func (l *LogsConfigKeys) socks5ProxyAddress() string {
+ return l.getConfig().GetString(l.getConfigKey("socks5_proxy_address"))
+}
+
+func (l *LogsConfigKeys) isForceTCPUse() bool {
+ return l.getConfig().GetBool(l.getConfigKey("use_tcp")) ||
+ l.getConfig().GetBool(l.getConfigKey("force_use_tcp"))
+}
+
+func (l *LogsConfigKeys) usePort443() bool {
+ return l.getConfig().GetBool(l.getConfigKey("use_port_443"))
+}
+
+func (l *LogsConfigKeys) isForceHTTPUse() bool {
+ return l.getConfig().GetBool(l.getConfigKey("use_http")) ||
+ l.getConfig().GetBool(l.getConfigKey("force_use_http"))
+}
+
+func (l *LogsConfigKeys) logsNoSSL() bool {
+ return l.getConfig().GetBool(l.getConfigKey("logs_no_ssl"))
+}
+
+func (l *LogsConfigKeys) maxMessageSizeBytes() int {
+ return l.getConfig().GetInt(l.getConfigKey("max_message_size_bytes"))
+}
+
+func (l *LogsConfigKeys) devModeNoSSL() bool {
+ return l.getConfig().GetBool(l.getConfigKey("dev_mode_no_ssl"))
+}
+
+func (l *LogsConfigKeys) devModeUseProto() bool {
+ return l.getConfig().GetBool(l.getConfigKey("dev_mode_use_proto"))
+}
+
+func (l *LogsConfigKeys) compressionLevel() int {
+ return l.getConfig().GetInt(l.getConfigKey("compression_level"))
+}
+
+func (l *LogsConfigKeys) useCompression() bool {
+ return l.getConfig().GetBool(l.getConfigKey("use_compression"))
+}
+
+func (l *LogsConfigKeys) hasAdditionalEndpoints() bool {
+ return len(l.getAdditionalEndpoints()) > 0
+}
+
+// getAPIKeyGetter returns a getter function to retrieve the API key from the configuration. The getter will refetch the
+// value from the configuration upon each call to ensure the latest version is used. This ensure that the logs agent is
+// compatible with rotating the API key at runtime.
+//
+// The getter will use "logs_config.api_key" over "api_key" when needed.
+func (l *LogsConfigKeys) getAPIKeyGetter() func() string {
+ path := "api_key"
+ if configKey := l.getConfigKey(path); l.isSetAndNotEmpty(configKey) {
+ path = configKey
+ }
+
+ return func() string {
+ return l.getConfig().GetString(path)
+ }
+}
+
+func (l *LogsConfigKeys) connectionResetInterval() time.Duration {
+ return time.Duration(l.getConfig().GetInt(l.getConfigKey("connection_reset_interval"))) * time.Second
+
+}
+
+func (l *LogsConfigKeys) getAdditionalEndpoints() []unmarshalEndpoint {
+ var endpoints []unmarshalEndpoint
+ var err error
+ configKey := l.getConfigKey("additional_endpoints")
+ raw := l.getConfig().Get(configKey)
+ if raw == nil {
+ return nil
+ }
+ if s, ok := raw.(string); ok && s != "" {
+ err = json.Unmarshal([]byte(s), &endpoints)
+ } else {
+ err = l.getConfig().UnmarshalKey(configKey, &endpoints)
+ }
+ if err != nil {
+ log.Warnf("Could not parse additional_endpoints for logs: %v", err)
+ }
+ return endpoints
+}
+
+func (l *LogsConfigKeys) expectedTagsDuration() time.Duration {
+ return l.getConfig().GetDuration(l.getConfigKey("expected_tags_duration"))
+}
+
+func (l *LogsConfigKeys) taggerWarmupDuration() time.Duration {
+ // note that this multiplies a duration (in ns) by 1 second (in ns), so the user must specify
+ // an integer number of seconds ("5") and not a duration expression ("5s").
+ return l.getConfig().GetDuration(l.getConfigKey("tagger_warmup_duration")) * time.Second
+}
+
+func (l *LogsConfigKeys) batchWait() time.Duration {
+ key := l.getConfigKey("batch_wait")
+ batchWait := l.getConfig().GetInt(key)
+ if batchWait < 1 || 10 < batchWait {
+ log.Warnf("Invalid %s: %v should be in [1, 10], fallback on %v", key, batchWait, pkgconfigsetup.DefaultBatchWait)
+ return pkgconfigsetup.DefaultBatchWait * time.Second
+ }
+ return (time.Duration(batchWait) * time.Second)
+}
+
+func (l *LogsConfigKeys) batchMaxConcurrentSend() int {
+ key := l.getConfigKey("batch_max_concurrent_send")
+ batchMaxConcurrentSend := l.getConfig().GetInt(key)
+ if batchMaxConcurrentSend < 0 {
+ log.Warnf("Invalid %s: %v should be >= 0, fallback on %v", key, batchMaxConcurrentSend, pkgconfigsetup.DefaultBatchMaxConcurrentSend)
+ return pkgconfigsetup.DefaultBatchMaxConcurrentSend
+ }
+ return batchMaxConcurrentSend
+}
+
+func (l *LogsConfigKeys) batchMaxSize() int {
+ key := l.getConfigKey("batch_max_size")
+ batchMaxSize := l.getConfig().GetInt(key)
+ if batchMaxSize <= 0 {
+ log.Warnf("Invalid %s: %v should be > 0, fallback on %v", key, batchMaxSize, pkgconfigsetup.DefaultBatchMaxSize)
+ return pkgconfigsetup.DefaultBatchMaxSize
+ }
+ return batchMaxSize
+}
+
+func (l *LogsConfigKeys) batchMaxContentSize() int {
+ key := l.getConfigKey("batch_max_content_size")
+ batchMaxContentSize := l.getConfig().GetInt(key)
+ if batchMaxContentSize <= 0 {
+ log.Warnf("Invalid %s: %v should be > 0, fallback on %v", key, batchMaxContentSize, pkgconfigsetup.DefaultBatchMaxContentSize)
+ return pkgconfigsetup.DefaultBatchMaxContentSize
+ }
+ return batchMaxContentSize
+}
+
+func (l *LogsConfigKeys) inputChanSize() int {
+ key := l.getConfigKey("input_chan_size")
+ inputChanSize := l.getConfig().GetInt(key)
+ if inputChanSize <= 0 {
+ log.Warnf("Invalid %s: %v should be > 0, fallback on %v", key, inputChanSize, pkgconfigsetup.DefaultInputChanSize)
+ return pkgconfigsetup.DefaultInputChanSize
+ }
+ return inputChanSize
+}
+
+func (l *LogsConfigKeys) senderBackoffFactor() float64 {
+ key := l.getConfigKey("sender_backoff_factor")
+ senderBackoffFactor := l.getConfig().GetFloat64(key)
+ if senderBackoffFactor < 2 {
+ log.Warnf("Invalid %s: %v should be >= 2, fallback on %v", key, senderBackoffFactor, pkgconfigsetup.DefaultLogsSenderBackoffFactor)
+ return pkgconfigsetup.DefaultLogsSenderBackoffFactor
+ }
+ return senderBackoffFactor
+}
+
+func (l *LogsConfigKeys) senderBackoffBase() float64 {
+ key := l.getConfigKey("sender_backoff_base")
+ senderBackoffBase := l.getConfig().GetFloat64(key)
+ if senderBackoffBase <= 0 {
+ log.Warnf("Invalid %s: %v should be > 0, fallback on %v", key, senderBackoffBase, pkgconfigsetup.DefaultLogsSenderBackoffBase)
+ return pkgconfigsetup.DefaultLogsSenderBackoffBase
+ }
+ return senderBackoffBase
+}
+
+func (l *LogsConfigKeys) senderBackoffMax() float64 {
+ key := l.getConfigKey("sender_backoff_max")
+ senderBackoffMax := l.getConfig().GetFloat64(key)
+ if senderBackoffMax <= 0 {
+ log.Warnf("Invalid %s: %v should be > 0, fallback on %v", key, senderBackoffMax, pkgconfigsetup.DefaultLogsSenderBackoffMax)
+ return pkgconfigsetup.DefaultLogsSenderBackoffMax
+ }
+ return senderBackoffMax
+}
+
+func (l *LogsConfigKeys) senderRecoveryInterval() int {
+ key := l.getConfigKey("sender_recovery_interval")
+ recoveryInterval := l.getConfig().GetInt(key)
+ if recoveryInterval <= 0 {
+ log.Warnf("Invalid %s: %v should be > 0, fallback on %v", key, recoveryInterval, pkgconfigsetup.DefaultLogsSenderBackoffRecoveryInterval)
+ return pkgconfigsetup.DefaultLogsSenderBackoffRecoveryInterval
+ }
+ return recoveryInterval
+}
+
+func (l *LogsConfigKeys) senderRecoveryReset() bool {
+ return l.getConfig().GetBool(l.getConfigKey("sender_recovery_reset"))
+}
+
+// AggregationTimeout is used when performing aggregation operations
+func (l *LogsConfigKeys) aggregationTimeout() time.Duration {
+ return l.getConfig().GetDuration(l.getConfigKey("aggregation_timeout")) * time.Millisecond
+}
+
+func (l *LogsConfigKeys) useV2API() bool {
+ return l.getConfig().GetBool(l.getConfigKey("use_v2_api"))
+}
+
+func (l *LogsConfigKeys) getObsPipelineConfigKey(configPrefix string, key string) string {
+ return configPrefix + "." + l.vectorPrefix + key
+}
+
+func (l *LogsConfigKeys) obsPipelineWorkerEnabled() bool {
+ if l.vectorPrefix == "" {
+ return false
+ }
+ if l.getConfig().GetBool(l.getObsPipelineConfigKey("observability_pipelines_worker", "enabled")) {
+ return true
+ }
+ return l.getConfig().GetBool(l.getObsPipelineConfigKey("vector", "enabled"))
+}
+
+func (l *LogsConfigKeys) getObsPipelineURL() (string, bool) {
+ if l.vectorPrefix != "" {
+ configKey := l.getObsPipelineConfigKey("observability_pipelines_worker", "url")
+ if l.isSetAndNotEmpty(configKey) {
+ return l.getConfig().GetString(configKey), true
+ }
+
+ configKey = l.getObsPipelineConfigKey("vector", "url")
+ if l.isSetAndNotEmpty(configKey) {
+ return l.getConfig().GetString(configKey), true
+ }
+ }
+ return "", false
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/constants.go b/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/constants.go
new file mode 100644
index 0000000000..2fd39527b3
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/constants.go
@@ -0,0 +1,18 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package config
+
+// Pipeline constraints
+const (
+ ChanSize = 100
+ DestinationPayloadChanSize = 10
+ NumberOfPipelines = 4
+)
+
+const (
+ // DateFormat is the default date format.
+ DateFormat = "2006-01-02T15:04:05.000000000Z"
+)
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/endpoints.go b/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/endpoints.go
new file mode 100644
index 0000000000..58ce82d312
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/endpoints.go
@@ -0,0 +1,320 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package config
+
+import (
+ "fmt"
+ "time"
+
+ pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
+ pkgconfigutils "github.com/DataDog/datadog-agent/pkg/config/utils"
+)
+
+// EPIntakeVersion is the events platform intake API version
+type EPIntakeVersion uint8
+
+// IntakeTrackType indicates the type of an endpoint intake.
+type IntakeTrackType string
+
+// IntakeProtocol indicates the protocol to use for an endpoint intake.
+type IntakeProtocol string
+
+// IntakeOrigin indicates the log source to use for an endpoint intake.
+type IntakeOrigin string
+
+const (
+ _ EPIntakeVersion = iota
+ // EPIntakeVersion1 is version 1 of the envets platform intake API
+ EPIntakeVersion1
+ // EPIntakeVersion2 is version 2 of the envets platform intake API
+ EPIntakeVersion2
+)
+
+// Endpoint holds all the organization and network parameters to send logs to Datadog.
+type Endpoint struct {
+ apiKeyGetter func() string
+ isReliable bool
+ useSSL bool
+
+ Host string `mapstructure:"host" json:"host"`
+ Port int
+ UseCompression bool `mapstructure:"use_compression" json:"use_compression"`
+ CompressionLevel int `mapstructure:"compression_level" json:"compression_level"`
+ ProxyAddress string
+ IsMRF bool `mapstructure:"-" json:"-"`
+ ConnectionResetInterval time.Duration
+
+ BackoffFactor float64
+ BackoffBase float64
+ BackoffMax float64
+ RecoveryInterval int
+ RecoveryReset bool
+
+ Version EPIntakeVersion
+ TrackType IntakeTrackType
+ Protocol IntakeProtocol
+ Origin IntakeOrigin
+}
+
+// unmarshalEndpoint is used to load additional endpoints from the configuration which stored as JSON/mapstructure.
+// A different type is used than Endpoint since we want some fields to be private in Endpoint (APIKey, IsReliable, ...).
+type unmarshalEndpoint struct {
+ APIKey string `mapstructure:"api_key" json:"api_key"`
+ IsReliable *bool `mapstructure:"is_reliable" json:"is_reliable"`
+ UseSSL *bool `mapstructure:"use_ssl" json:"use_ssl"`
+
+ Endpoint `mapstructure:",squash"`
+}
+
+// NewEndpoint returns a new Endpoint with the minimal field initialized.
+func NewEndpoint(apiKey string, host string, port int, useSSL bool) Endpoint {
+ apiKey = pkgconfigutils.SanitizeAPIKey(apiKey)
+ return Endpoint{
+ apiKeyGetter: func() string { return apiKey },
+ Host: host,
+ Port: port,
+ useSSL: useSSL,
+ isReliable: true, // by default endpoints are reliable
+ }
+}
+
+// NewTCPEndpoint returns a new TCP Endpoint based on LogsConfigKeys. The endpoint is by default reliable and will use
+// socks proxy and SSL settings from the configuration.
+func NewTCPEndpoint(logsConfig *LogsConfigKeys) Endpoint {
+ return Endpoint{
+ apiKeyGetter: logsConfig.getAPIKeyGetter(),
+ ProxyAddress: logsConfig.socks5ProxyAddress(),
+ ConnectionResetInterval: logsConfig.connectionResetInterval(),
+ useSSL: logsConfig.logsNoSSL(),
+ isReliable: true, // by default endpoints are reliable
+ }
+}
+
+// NewHTTPEndpoint returns a new HTTP Endpoint based on LogsConfigKeys The endpoint is by default reliable and will use
+// the settings related to HTTP from the configuration (compression, Backoff, recovery, ...).
+func NewHTTPEndpoint(logsConfig *LogsConfigKeys) Endpoint {
+ return Endpoint{
+ apiKeyGetter: logsConfig.getAPIKeyGetter(),
+ UseCompression: logsConfig.useCompression(),
+ CompressionLevel: logsConfig.compressionLevel(),
+ ConnectionResetInterval: logsConfig.connectionResetInterval(),
+ BackoffBase: logsConfig.senderBackoffBase(),
+ BackoffMax: logsConfig.senderBackoffMax(),
+ BackoffFactor: logsConfig.senderBackoffFactor(),
+ RecoveryInterval: logsConfig.senderRecoveryInterval(),
+ RecoveryReset: logsConfig.senderRecoveryReset(),
+ useSSL: logsConfig.logsNoSSL(),
+ isReliable: true, // by default endpoints are reliable
+ }
+}
+
+// The setting from 'logs_config.additional_endpoints' is directly unmarshalled from the configuration into a
+// []unmarshalEndpoint and do not use the constructors. In this case, apiKeyGetter is initialized to returned the API
+// key from the loaded data instead of 'api_key'/'logs_config.api_key'.
+
+func loadTCPAdditionalEndpoints(main Endpoint, l *LogsConfigKeys) []Endpoint {
+ additionals := l.getAdditionalEndpoints()
+
+ newEndpoints := make([]Endpoint, 0, len(additionals))
+ for _, e := range additionals {
+ newE := NewEndpoint(e.APIKey, e.Host, e.Port, false)
+
+ newE.UseCompression = e.UseCompression
+ newE.CompressionLevel = e.CompressionLevel
+ newE.ProxyAddress = l.socks5ProxyAddress()
+ newE.isReliable = e.IsReliable == nil || *e.IsReliable
+ newE.ConnectionResetInterval = e.ConnectionResetInterval
+ newE.BackoffFactor = e.BackoffFactor
+ newE.BackoffBase = e.BackoffBase
+ newE.BackoffMax = e.BackoffMax
+ newE.RecoveryInterval = e.RecoveryInterval
+ newE.RecoveryReset = e.RecoveryReset
+ newE.Version = e.Version
+ newE.TrackType = e.TrackType
+ newE.Protocol = e.Protocol
+ newE.Origin = e.Origin
+
+ if e.UseSSL != nil {
+ newE.useSSL = *e.UseSSL
+ } else {
+ newE.useSSL = main.useSSL
+ }
+ newEndpoints = append(newEndpoints, newE)
+ }
+ return newEndpoints
+}
+
+func loadHTTPAdditionalEndpoints(main Endpoint, l *LogsConfigKeys, intakeTrackType IntakeTrackType, intakeProtocol IntakeProtocol, intakeOrigin IntakeOrigin) []Endpoint {
+ additionals := l.getAdditionalEndpoints()
+
+ newEndpoints := make([]Endpoint, 0, len(additionals))
+ for _, e := range additionals {
+ newE := NewEndpoint(e.APIKey, e.Host, e.Port, false)
+
+ newE.UseCompression = main.UseCompression
+ newE.CompressionLevel = main.CompressionLevel
+ newE.ProxyAddress = e.ProxyAddress
+ newE.isReliable = e.IsReliable == nil || *e.IsReliable
+ newE.ConnectionResetInterval = e.ConnectionResetInterval
+ newE.BackoffFactor = main.BackoffFactor
+ newE.BackoffBase = main.BackoffBase
+ newE.BackoffMax = main.BackoffMax
+ newE.RecoveryInterval = main.RecoveryInterval
+ newE.RecoveryReset = main.RecoveryReset
+ newE.Version = e.Version
+ newE.TrackType = e.TrackType
+ newE.Protocol = e.Protocol
+ newE.Origin = e.Origin
+
+ if e.UseSSL != nil {
+ newE.useSSL = *e.UseSSL
+ } else {
+ newE.useSSL = main.useSSL
+ }
+
+ if newE.Version == 0 {
+ newE.Version = main.Version
+ }
+ if newE.Version == EPIntakeVersion2 {
+ newE.TrackType = intakeTrackType
+ newE.Protocol = intakeProtocol
+ newE.Origin = intakeOrigin
+ }
+
+ newEndpoints = append(newEndpoints, newE)
+ }
+ return newEndpoints
+}
+
+// GetAPIKey returns the latest API Key for the Endpoint, including when the configuration gets updated at runtime
+func (e *Endpoint) GetAPIKey() string {
+ return e.apiKeyGetter()
+}
+
+// UseSSL returns the useSSL config setting
+func (e *Endpoint) UseSSL() bool {
+ return e.useSSL
+}
+
+// GetStatus returns the endpoint status
+func (e *Endpoint) GetStatus(prefix string, useHTTP bool) string {
+ compression := "uncompressed"
+ if e.UseCompression {
+ compression = "compressed"
+ }
+
+ host := e.Host
+ port := e.Port
+
+ var protocol string
+ if useHTTP {
+ if e.UseSSL() {
+ protocol = "HTTPS"
+ if port == 0 {
+ port = 443 // use default port
+ }
+ } else {
+ protocol = "HTTP"
+ // this case technically can't happens. In order to
+ // disable SSL, user have to use a custom URL and
+ // specify the port manually.
+ if port == 0 {
+ port = 80 // use default port
+ }
+ }
+ } else {
+ if e.UseSSL() {
+ protocol = "SSL encrypted TCP"
+ } else {
+ protocol = "TCP"
+ }
+ }
+
+ return fmt.Sprintf("%sSending %s logs in %s to %s on port %d", prefix, compression, protocol, host, port)
+}
+
+// IsReliable returns true if the endpoint is reliable. Endpoints are reliable by default.
+func (e *Endpoint) IsReliable() bool {
+ return e.isReliable
+}
+
+// Endpoints holds the main endpoint and additional ones to dualship logs.
+type Endpoints struct {
+ Main Endpoint
+ Endpoints []Endpoint
+ UseProto bool
+ UseHTTP bool
+ BatchWait time.Duration
+ BatchMaxConcurrentSend int
+ BatchMaxSize int
+ BatchMaxContentSize int
+ InputChanSize int
+}
+
+// GetStatus returns the endpoints status, one line per endpoint
+func (e *Endpoints) GetStatus() []string {
+ result := make([]string, 0)
+ for _, endpoint := range e.GetReliableEndpoints() {
+ result = append(result, endpoint.GetStatus("Reliable: ", e.UseHTTP))
+ }
+ for _, endpoint := range e.GetUnReliableEndpoints() {
+ result = append(result, endpoint.GetStatus("Unreliable: ", e.UseHTTP))
+ }
+ return result
+}
+
+// NewEndpoints returns a new endpoints composite with default batching settings
+func NewEndpoints(main Endpoint, additionalEndpoints []Endpoint, useProto bool, useHTTP bool) *Endpoints {
+ return NewEndpointsWithBatchSettings(
+ main,
+ additionalEndpoints,
+ useProto,
+ useHTTP,
+ pkgconfigsetup.DefaultBatchWait,
+ pkgconfigsetup.DefaultBatchMaxConcurrentSend,
+ pkgconfigsetup.DefaultBatchMaxSize,
+ pkgconfigsetup.DefaultBatchMaxContentSize,
+ pkgconfigsetup.DefaultInputChanSize,
+ )
+}
+
+// NewEndpointsWithBatchSettings returns a new endpoints composite with non-default batching settings specified
+func NewEndpointsWithBatchSettings(main Endpoint, additionalEndpoints []Endpoint, useProto bool, useHTTP bool, batchWait time.Duration, batchMaxConcurrentSend int, batchMaxSize int, batchMaxContentSize int, inputChanSize int) *Endpoints {
+ return &Endpoints{
+ Main: main,
+ Endpoints: append([]Endpoint{main}, additionalEndpoints...),
+ UseProto: useProto,
+ UseHTTP: useHTTP,
+ BatchWait: batchWait,
+ BatchMaxConcurrentSend: batchMaxConcurrentSend,
+ BatchMaxSize: batchMaxSize,
+ BatchMaxContentSize: batchMaxContentSize,
+ InputChanSize: inputChanSize,
+ }
+}
+
+// GetReliableEndpoints returns additional endpoints that can be failed over to and block the pipeline in the
+// event of an outage and will retry errors. These endpoints are treated the same as the main endpoint.
+func (e *Endpoints) GetReliableEndpoints() []Endpoint {
+ endpoints := []Endpoint{}
+ for _, endpoint := range e.Endpoints {
+ if endpoint.IsReliable() {
+ endpoints = append(endpoints, endpoint)
+ }
+ }
+ return endpoints
+}
+
+// GetUnReliableEndpoints returns additional endpoints that do not guarantee logs are received in the event of an error.
+func (e *Endpoints) GetUnReliableEndpoints() []Endpoint {
+ endpoints := []Endpoint{}
+ for _, endpoint := range e.Endpoints {
+ if !endpoint.IsReliable() {
+ endpoints = append(endpoints, endpoint)
+ }
+ }
+ return endpoints
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/integration_config.go b/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/integration_config.go
new file mode 100644
index 0000000000..83d7667896
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/integration_config.go
@@ -0,0 +1,306 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package config
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+ "sync"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+)
+
+// Logs source types
+const (
+ TCPType = "tcp"
+ UDPType = "udp"
+ FileType = "file"
+ DockerType = "docker"
+ ContainerdType = "containerd"
+ JournaldType = "journald"
+ WindowsEventType = "windows_event"
+ StringChannelType = "string_channel"
+
+ // UTF16BE for UTF-16 Big endian encoding
+ UTF16BE string = "utf-16-be"
+ // UTF16LE for UTF-16 Little Endian encoding
+ UTF16LE string = "utf-16-le"
+ // SHIFTJIS for Shift JIS (Japanese) encoding
+ SHIFTJIS string = "shift-jis"
+)
+
+// LogsConfig represents a log source config, which can be for instance
+// a file to tail or a port to listen to.
+type LogsConfig struct {
+ Type string
+
+ Port int // Network
+ IdleTimeout string `mapstructure:"idle_timeout" json:"idle_timeout"` // Network
+ Path string // File, Journald
+
+ Encoding string `mapstructure:"encoding" json:"encoding"` // File
+ ExcludePaths []string `mapstructure:"exclude_paths" json:"exclude_paths"` // File
+ TailingMode string `mapstructure:"start_position" json:"start_position"` // File
+
+ //nolint:revive // TODO(AML) Fix revive linter
+ ConfigId string `mapstructure:"config_id" json:"config_id"` // Journald
+ IncludeSystemUnits []string `mapstructure:"include_units" json:"include_units"` // Journald
+ ExcludeSystemUnits []string `mapstructure:"exclude_units" json:"exclude_units"` // Journald
+ IncludeUserUnits []string `mapstructure:"include_user_units" json:"include_user_units"` // Journald
+ ExcludeUserUnits []string `mapstructure:"exclude_user_units" json:"exclude_user_units"` // Journald
+ IncludeMatches []string `mapstructure:"include_matches" json:"include_matches"` // Journald
+ ExcludeMatches []string `mapstructure:"exclude_matches" json:"exclude_matches"` // Journald
+ ContainerMode bool `mapstructure:"container_mode" json:"container_mode"` // Journald
+
+ Image string // Docker
+ Label string // Docker
+ // Name contains the container name
+ Name string // Docker
+ // Identifier contains the container ID. This is also set for File sources and used to
+ // determine the appropriate tags for the logs.
+ Identifier string // Docker, File
+
+ ChannelPath string `mapstructure:"channel_path" json:"channel_path"` // Windows Event
+ Query string // Windows Event
+
+ // used as input only by the Channel tailer.
+ // could have been unidirectional but the tailer could not close it in this case.
+ Channel chan *ChannelMessage
+
+ // ChannelTags are the tags attached to messages on Channel; unlike Tags this can be
+ // modified at runtime (as long as ChannelTagsMutex is held).
+ ChannelTags []string
+
+ // ChannelTagsMutex guards ChannelTags.
+ ChannelTagsMutex sync.Mutex
+
+ Service string
+ Source string
+ SourceCategory string
+ Tags []string
+ ProcessingRules []*ProcessingRule `mapstructure:"log_processing_rules" json:"log_processing_rules"`
+ // ProcessRawMessage is used to process the raw message instead of only the content part of the message.
+ ProcessRawMessage *bool `mapstructure:"process_raw_message" json:"process_raw_message"`
+
+ AutoMultiLine *bool `mapstructure:"auto_multi_line_detection" json:"auto_multi_line_detection"`
+ AutoMultiLineSampleSize int `mapstructure:"auto_multi_line_sample_size" json:"auto_multi_line_sample_size"`
+ AutoMultiLineMatchThreshold float64 `mapstructure:"auto_multi_line_match_threshold" json:"auto_multi_line_match_threshold"`
+}
+
+// Dump dumps the contents of this struct to a string, for debugging purposes.
+func (c *LogsConfig) Dump(multiline bool) string {
+ if c == nil {
+ return "&LogsConfig(nil)"
+ }
+
+ var b strings.Builder
+ ws := func(fmt string) string {
+ if multiline {
+ return "\n\t" + fmt
+ }
+ return " " + fmt
+ }
+
+ fmt.Fprint(&b, ws("&LogsConfig{"))
+ fmt.Fprintf(&b, ws("Type: %#v,"), c.Type)
+ switch c.Type {
+ case TCPType:
+ fmt.Fprintf(&b, ws("Port: %d,"), c.Port)
+ fmt.Fprintf(&b, ws("IdleTimeout: %#v,"), c.IdleTimeout)
+ case UDPType:
+ fmt.Fprintf(&b, ws("Port: %d,"), c.Port)
+ fmt.Fprintf(&b, ws("IdleTimeout: %#v,"), c.IdleTimeout)
+ case FileType:
+ fmt.Fprintf(&b, ws("Path: %#v,"), c.Path)
+ fmt.Fprintf(&b, ws("Encoding: %#v,"), c.Encoding)
+ fmt.Fprintf(&b, ws("Identifier: %#v,"), c.Identifier)
+ fmt.Fprintf(&b, ws("ExcludePaths: %#v,"), c.ExcludePaths)
+ fmt.Fprintf(&b, ws("TailingMode: %#v,"), c.TailingMode)
+ case DockerType, ContainerdType:
+ fmt.Fprintf(&b, ws("Image: %#v,"), c.Image)
+ fmt.Fprintf(&b, ws("Label: %#v,"), c.Label)
+ fmt.Fprintf(&b, ws("Name: %#v,"), c.Name)
+ fmt.Fprintf(&b, ws("Identifier: %#v,"), c.Identifier)
+ case JournaldType:
+ fmt.Fprintf(&b, ws("Path: %#v,"), c.Path)
+ fmt.Fprintf(&b, ws("IncludeSystemUnits: %#v,"), c.IncludeSystemUnits)
+ fmt.Fprintf(&b, ws("ExcludeSystemUnits: %#v,"), c.ExcludeSystemUnits)
+ fmt.Fprintf(&b, ws("IncludeUserUnits: %#v,"), c.IncludeUserUnits)
+ fmt.Fprintf(&b, ws("ExcludeUserUnits: %#v,"), c.ExcludeUserUnits)
+ fmt.Fprintf(&b, ws("ContainerMode: %t,"), c.ContainerMode)
+ case WindowsEventType:
+ fmt.Fprintf(&b, ws("ChannelPath: %#v,"), c.ChannelPath)
+ fmt.Fprintf(&b, ws("Query: %#v,"), c.Query)
+ case StringChannelType:
+ fmt.Fprintf(&b, ws("Channel: %p,"), c.Channel)
+ c.ChannelTagsMutex.Lock()
+ fmt.Fprintf(&b, ws("ChannelTags: %#v,"), c.ChannelTags)
+ c.ChannelTagsMutex.Unlock()
+ }
+ fmt.Fprintf(&b, ws("Service: %#v,"), c.Service)
+ fmt.Fprintf(&b, ws("Source: %#v,"), c.Source)
+ fmt.Fprintf(&b, ws("SourceCategory: %#v,"), c.SourceCategory)
+ fmt.Fprintf(&b, ws("Tags: %#v,"), c.Tags)
+ fmt.Fprintf(&b, ws("ProcessingRules: %#v,"), c.ProcessingRules)
+ if c.ProcessRawMessage != nil {
+ fmt.Fprintf(&b, ws("ProcessRawMessage: %t,"), *c.ProcessRawMessage)
+ } else {
+ fmt.Fprint(&b, ws("ProcessRawMessage: nil,"))
+ }
+ fmt.Fprintf(&b, ws("ShouldProcessRawMessage(): %#v,"), c.ShouldProcessRawMessage())
+ if c.AutoMultiLine != nil {
+ fmt.Fprintf(&b, ws("AutoMultiLine: %t,"), *c.AutoMultiLine)
+ } else {
+ fmt.Fprint(&b, ws("AutoMultiLine: nil,"))
+ }
+ fmt.Fprintf(&b, ws("AutoMultiLineSampleSize: %d,"), c.AutoMultiLineSampleSize)
+ fmt.Fprintf(&b, ws("AutoMultiLineMatchThreshold: %f}"), c.AutoMultiLineMatchThreshold)
+ return b.String()
+}
+
+// PublicJSON serialize the structure to make sure we only export fields that can be relevant to customers.
+// This is used to send the logs config to the backend as part of the metadata payload.
+func (c *LogsConfig) PublicJSON() ([]byte, error) {
+ // Export only fields that are explicitly documented in the public documentation
+ return json.Marshal(&struct {
+ Type string `json:"type,omitempty"`
+ Port int `json:"port,omitempty"` // Network
+ Path string `json:"path,omitempty"` // File, Journald
+ Encoding string `json:"encoding,omitempty"` // File
+ ExcludePaths []string `json:"exclude_paths,omitempty"` // File
+ TailingMode string `json:"start_position,omitempty"` // File
+ ChannelPath string `json:"channel_path,omitempty"` // Windows Event
+ Service string `json:"service,omitempty"`
+ Source string `json:"source,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+ ProcessingRules []*ProcessingRule `json:"log_processing_rules,omitempty"`
+ AutoMultiLine *bool `json:"auto_multi_line_detection,omitempty"`
+ }{
+ Type: c.Type,
+ Port: c.Port,
+ Path: c.Path,
+ Encoding: c.Encoding,
+ ExcludePaths: c.ExcludePaths,
+ TailingMode: c.TailingMode,
+ ChannelPath: c.ChannelPath,
+ Service: c.Service,
+ Source: c.Source,
+ Tags: c.Tags,
+ ProcessingRules: c.ProcessingRules,
+ AutoMultiLine: c.AutoMultiLine,
+ })
+}
+
+// TailingMode type
+type TailingMode uint8
+
+// Tailing Modes
+const (
+ ForceBeginning = iota
+ ForceEnd
+ Beginning
+ End
+)
+
+var tailingModeTuples = []struct {
+ s string
+ m TailingMode
+}{
+ {"forceBeginning", ForceBeginning},
+ {"forceEnd", ForceEnd},
+ {"beginning", Beginning},
+ {"end", End},
+}
+
+// TailingModeFromString parses a string and returns a corresponding tailing mode, default to End if not found
+func TailingModeFromString(mode string) (TailingMode, bool) {
+ for _, t := range tailingModeTuples {
+ if t.s == mode {
+ return t.m, true
+ }
+ }
+ return End, false
+}
+
+// TailingModeToString returns seelog string representation for a specified tailing mode. Returns "" for invalid tailing mode.
+func (mode TailingMode) String() string {
+ for _, t := range tailingModeTuples {
+ if t.m == mode {
+ return t.s
+ }
+ }
+ return ""
+}
+
+// Validate returns an error if the config is misconfigured
+func (c *LogsConfig) Validate() error {
+ switch {
+ case c.Type == "":
+ // user don't have to specify a logs-config type when defining
+ // an autodiscovery label because so we must override it at some point,
+ // this check is mostly used for sanity purposed to detect an override miss.
+ return fmt.Errorf("a config must have a type")
+ case c.Type == FileType:
+ if c.Path == "" {
+ return fmt.Errorf("file source must have a path")
+ }
+ err := c.validateTailingMode()
+ if err != nil {
+ return err
+ }
+ case c.Type == TCPType && c.Port == 0:
+ return fmt.Errorf("tcp source must have a port")
+ case c.Type == UDPType && c.Port == 0:
+ return fmt.Errorf("udp source must have a port")
+ }
+ err := ValidateProcessingRules(c.ProcessingRules)
+ if err != nil {
+ return err
+ }
+ return CompileProcessingRules(c.ProcessingRules)
+}
+
+func (c *LogsConfig) validateTailingMode() error {
+ mode, found := TailingModeFromString(c.TailingMode)
+ if !found && c.TailingMode != "" {
+ return fmt.Errorf("invalid tailing mode '%v' for %v", c.TailingMode, c.Path)
+ }
+ if ContainsWildcard(c.Path) && (mode == Beginning || mode == ForceBeginning) {
+ return fmt.Errorf("tailing from the beginning is not supported for wildcard path %v", c.Path)
+ }
+ return nil
+}
+
+// AutoMultiLineEnabled determines whether auto multi line detection is enabled for this config,
+// considering both the agent-wide logs_config.auto_multi_line_detection and any config for this
+// particular log source.
+func (c *LogsConfig) AutoMultiLineEnabled(coreConfig pkgconfigmodel.Reader) bool {
+ if c.AutoMultiLine != nil {
+ return *c.AutoMultiLine
+ }
+ return coreConfig.GetBool("logs_config.auto_multi_line_detection")
+}
+
+// ShouldProcessRawMessage returns if the raw message should be processed instead
+// of only the message content.
+// This is tightly linked to how messages are transmitted through the pipeline.
+// If returning true, tailers using structured message (journald, windowsevents)
+// will fall back to original behavior of sending the whole message (e.g. JSON
+// for journald) for post-processing.
+// Otherwise, the message content is extracted from the structured message and
+// only this part is post-processed and sent to the intake.
+func (c *LogsConfig) ShouldProcessRawMessage() bool {
+ if c.ProcessRawMessage != nil {
+ return *c.ProcessRawMessage
+ }
+ return true // default behaviour when nothing's been configured
+}
+
+// ContainsWildcard returns true if the path contains any wildcard character
+func ContainsWildcard(path string) bool {
+ return strings.ContainsAny(path, "*?[")
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/messages.go b/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/messages.go
new file mode 100644
index 0000000000..ddf03e0cb3
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/messages.go
@@ -0,0 +1,51 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package config
+
+import (
+ "sync"
+)
+
+// Messages holds messages and warning that can be displayed in the status
+// Warnings are display at the top of the log section in the status and
+// messages are displayed in the log source that generated the message
+type Messages struct {
+ messages map[string]string
+ lock *sync.Mutex
+}
+
+// NewMessages initialize Messages with the default values
+func NewMessages() *Messages {
+ return &Messages{
+ messages: make(map[string]string),
+ lock: &sync.Mutex{},
+ }
+}
+
+// AddMessage create a message
+func (m *Messages) AddMessage(key string, message string) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ m.messages[key] = message
+}
+
+// GetMessages returns all the messages
+func (m *Messages) GetMessages() []string {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ messages := make([]string, 0)
+ for _, message := range m.messages {
+ messages = append(messages, message)
+ }
+ return messages
+}
+
+// RemoveMessage removes a message
+func (m *Messages) RemoveMessage(key string) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ delete(m.messages, key)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/parser.go b/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/parser.go
new file mode 100644
index 0000000000..9330ca5f8b
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/parser.go
@@ -0,0 +1,46 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package config
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+
+ "github.com/DataDog/viper"
+)
+
+// ParseJSON parses the data formatted in JSON
+// returns an error if the parsing failed.
+func ParseJSON(data []byte) ([]*LogsConfig, error) {
+ var configs []*LogsConfig
+ err := json.Unmarshal(data, &configs)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse JSON logs config: %v", err)
+ }
+ return configs, nil
+}
+
+const yaml = "yaml"
+const logsPath = "logs"
+
+// ParseYAML parses the data formatted in YAML,
+// returns an error if the parsing failed.
+func ParseYAML(data []byte) ([]*LogsConfig, error) {
+ var configs []*LogsConfig
+ var err error
+ v := viper.New()
+ v.SetConfigType(yaml)
+ err = v.ReadConfig(bytes.NewBuffer(data))
+ if err != nil {
+ return nil, fmt.Errorf("could not decode YAML logs config: %v", err)
+ }
+ err = v.UnmarshalKey(logsPath, &configs)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse YAML logs config: %v", err)
+ }
+ return configs, nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/processing_rules.go b/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/processing_rules.go
new file mode 100644
index 0000000000..0a5d946a79
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/logs/agent/config/processing_rules.go
@@ -0,0 +1,85 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package config
+
+import (
+ "fmt"
+ "regexp"
+)
+
+// Processing rule types
+const (
+ ExcludeAtMatch = "exclude_at_match"
+ IncludeAtMatch = "include_at_match"
+ MaskSequences = "mask_sequences"
+ MultiLine = "multi_line"
+)
+
+// ProcessingRule defines an exclusion or a masking rule to
+// be applied on log lines
+type ProcessingRule struct {
+ Type string
+ Name string
+ ReplacePlaceholder string `mapstructure:"replace_placeholder" json:"replace_placeholder"`
+ Pattern string
+ // TODO: should be moved out
+ Regex *regexp.Regexp
+ Placeholder []byte
+}
+
+// ValidateProcessingRules validates the rules and raises an error if one is misconfigured.
+// Each processing rule must have:
+// - a valid name
+// - a valid type
+// - a valid pattern that compiles
+func ValidateProcessingRules(rules []*ProcessingRule) error {
+ for _, rule := range rules {
+ if rule.Name == "" {
+ return fmt.Errorf("all processing rules must have a name")
+ }
+
+ switch rule.Type {
+ case ExcludeAtMatch, IncludeAtMatch, MaskSequences, MultiLine:
+ break
+ case "":
+ return fmt.Errorf("type must be set for processing rule `%s`", rule.Name)
+ default:
+ return fmt.Errorf("type %s is not supported for processing rule `%s`", rule.Type, rule.Name)
+ }
+
+ if rule.Pattern == "" {
+ return fmt.Errorf("no pattern provided for processing rule: %s", rule.Name)
+ }
+ _, err := regexp.Compile(rule.Pattern)
+ if err != nil {
+ return fmt.Errorf("invalid pattern %s for processing rule: %s", rule.Pattern, rule.Name)
+ }
+ }
+ return nil
+}
+
+// CompileProcessingRules compiles all processing rule regular expressions.
+func CompileProcessingRules(rules []*ProcessingRule) error {
+ for _, rule := range rules {
+ re, err := regexp.Compile(rule.Pattern)
+ if err != nil {
+ return err
+ }
+ switch rule.Type {
+ case ExcludeAtMatch, IncludeAtMatch:
+ rule.Regex = re
+ case MaskSequences:
+ rule.Regex = re
+ rule.Placeholder = []byte(rule.ReplacePlaceholder)
+ case MultiLine:
+ rule.Regex, err = regexp.Compile("^" + rule.Pattern)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/LICENSE b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/component.go b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/component.go
new file mode 100644
index 0000000000..b6cdd56a6d
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/component.go
@@ -0,0 +1,32 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+// Package logsagentpipeline contains logs agent pipeline component
+package logsagentpipeline
+
+import (
+ "context"
+
+ "github.com/DataDog/datadog-agent/pkg/logs/pipeline"
+)
+
+// team: opentelemetry
+
+// Component is the component type.
+type Component interface {
+ // GetPipelineProvider gets the pipeline provider
+ GetPipelineProvider() pipeline.Provider
+}
+
+// LogsAgent is a compat version of component for non fx usage
+type LogsAgent interface {
+ Component
+
+ // Start sets up the logs agent and starts its pipelines
+ Start(context.Context) error
+
+ // Stop stops the logs agent and all elements of the data pipeline
+ Stop(context.Context) error
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/LICENSE b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/agent.go b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/agent.go
new file mode 100644
index 0000000000..9ad68b1537
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/agent.go
@@ -0,0 +1,228 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+package logsagentpipelineimpl
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ configComponent "github.com/DataDog/datadog-agent/comp/core/config"
+ "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface"
+ logComponent "github.com/DataDog/datadog-agent/comp/core/log"
+ "github.com/DataDog/datadog-agent/comp/logs/agent/config"
+ "github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline"
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ "github.com/DataDog/datadog-agent/pkg/logs/auditor"
+ "github.com/DataDog/datadog-agent/pkg/logs/client"
+ "github.com/DataDog/datadog-agent/pkg/logs/client/http"
+ "github.com/DataDog/datadog-agent/pkg/logs/diagnostic"
+ "github.com/DataDog/datadog-agent/pkg/logs/pipeline"
+ "github.com/DataDog/datadog-agent/pkg/status/health"
+ "github.com/DataDog/datadog-agent/pkg/util/optional"
+ "github.com/DataDog/datadog-agent/pkg/util/startstop"
+
+ "go.uber.org/fx"
+ "go.uber.org/zap"
+)
+
+const (
+ intakeTrackType = "logs"
+
+ // Log messages
+ multiLineWarning = "multi_line processing rules are not supported as global processing rules."
+)
+
+// Dependencies specifies the list of dependencies needed to initialize the logs agent
+type Dependencies struct {
+ fx.In
+
+ Lc fx.Lifecycle
+ Log logComponent.Component
+ Config configComponent.Component
+ Hostname hostnameinterface.Component
+}
+
+// Agent represents the data pipeline that collects, decodes, processes and sends logs to the backend.
+type Agent struct {
+ log logComponent.Component
+ config pkgconfigmodel.Reader
+ hostname hostnameinterface.Component
+
+ endpoints *config.Endpoints
+ auditor auditor.Auditor
+ destinationsCtx *client.DestinationsContext
+ pipelineProvider pipeline.Provider
+ health *health.Handle
+}
+
+// NewLogsAgentComponent returns a new instance of Agent as a Component
+func NewLogsAgentComponent(deps Dependencies) optional.Option[logsagentpipeline.Component] {
+ logsAgent := NewLogsAgent(deps)
+ if logsAgent == nil {
+ return optional.NewNoneOption[logsagentpipeline.Component]()
+ }
+ return optional.NewOption[logsagentpipeline.Component](logsAgent)
+}
+
+// NewLogsAgent returns a new instance of Agent with the given dependencies
+func NewLogsAgent(deps Dependencies) logsagentpipeline.LogsAgent {
+ if deps.Config.GetBool("logs_enabled") || deps.Config.GetBool("log_enabled") {
+ if deps.Config.GetBool("log_enabled") {
+ deps.Log.Warn(`"log_enabled" is deprecated, use "logs_enabled" instead`)
+ }
+
+ logsAgent := &Agent{
+ log: deps.Log,
+ config: deps.Config,
+ hostname: deps.Hostname,
+ }
+ if deps.Lc != nil {
+ deps.Lc.Append(fx.Hook{
+ OnStart: logsAgent.Start,
+ OnStop: logsAgent.Stop,
+ })
+ }
+
+ return logsAgent
+ }
+
+ deps.Log.Debug("logs-agent disabled")
+ return nil
+}
+
+// Start sets up the logs agent and starts its pipelines
+func (a *Agent) Start(context.Context) error {
+ a.log.Debug("Starting logs-agent...")
+
+ // setup the server config
+ endpoints, err := buildEndpoints(a.config)
+
+ if err != nil {
+ message := fmt.Sprintf("Invalid endpoints: %v", err)
+ return errors.New(message)
+ }
+
+ a.endpoints = endpoints
+
+ err = a.setupAgent()
+
+ if err != nil {
+ a.log.Error("Could not start logs-agent: ", zap.Error(err))
+ return err
+ }
+
+ a.startPipeline()
+ a.log.Debug("logs-agent started")
+
+ return nil
+}
+
+func (a *Agent) setupAgent() error {
+ // setup global processing rules
+ processingRules, err := config.GlobalProcessingRules(a.config)
+ if err != nil {
+ message := fmt.Sprintf("Invalid processing rules: %v", err)
+ return errors.New(message)
+ }
+
+ if config.HasMultiLineRule(processingRules) {
+ a.log.Warn(multiLineWarning)
+ }
+
+ a.SetupPipeline(processingRules)
+ return nil
+}
+
+// startPipeline starts all the elements of the data pipeline in the right order to prevent data loss
+func (a *Agent) startPipeline() {
+ starter := startstop.NewStarter(
+ a.destinationsCtx,
+ a.auditor,
+ a.pipelineProvider,
+ )
+ starter.Start()
+}
+
+// Stop stops the logs agent and all elements of the data pipeline
+func (a *Agent) Stop(context.Context) error {
+ a.log.Debug("Stopping logs-agent")
+
+ stopper := startstop.NewSerialStopper(
+ a.pipelineProvider,
+ a.auditor,
+ a.destinationsCtx,
+ )
+
+ // This will try to stop everything in order, including the potentially blocking
+ // parts like the sender. After StopTimeout it will just stop the last part of the
+ // pipeline, disconnecting it from the auditor, to make sure that the pipeline is
+ // flushed before stopping.
+ // TODO: Add this feature in the stopper.
+ c := make(chan struct{})
+ go func() {
+ stopper.Stop()
+ close(c)
+ }()
+ timeout := time.Duration(a.config.GetInt("logs_config.stop_grace_period")) * time.Second
+ select {
+ case <-c:
+ case <-time.After(timeout):
+ a.log.Debug("Timed out when stopping logs-agent, forcing it to stop now")
+ // We force all destinations to read/flush all the messages they get without
+ // trying to write to the network.
+ a.destinationsCtx.Stop()
+ // Wait again for the stopper to complete.
+ // In some situation, the stopper unfortunately never succeed to complete,
+ // we've already reached the grace period, give it some more seconds and
+ // then force quit.
+ timeout := time.NewTimer(5 * time.Second)
+ select {
+ case <-c:
+ case <-timeout.C:
+ a.log.Warn("Force close of the Logs Agent.")
+ }
+ }
+ a.log.Debug("logs-agent stopped")
+ return nil
+}
+
+// GetPipelineProvider gets the pipeline provider
+func (a *Agent) GetPipelineProvider() pipeline.Provider {
+ return a.pipelineProvider
+}
+
+// SetupPipeline initializes the logs agent pipeline and its dependencies
+func (a *Agent) SetupPipeline(
+ processingRules []*config.ProcessingRule,
+) {
+ health := health.RegisterLiveness("logs-agent")
+
+ // setup the auditor
+ // We pass the health handle to the auditor because it's the end of the pipeline and the most
+ // critical part. Arguably it could also be plugged to the destination.
+ auditorTTL := time.Duration(a.config.GetInt("logs_config.auditor_ttl")) * time.Hour
+ auditor := auditor.New(a.config.GetString("logs_config.run_path"), auditor.DefaultRegistryFilename, auditorTTL, health)
+ destinationsCtx := client.NewDestinationsContext()
+
+ // setup the pipeline provider that provides pairs of processor and sender
+ pipelineProvider := pipeline.NewProvider(config.NumberOfPipelines, auditor, &diagnostic.NoopMessageReceiver{}, processingRules, a.endpoints, destinationsCtx, NewStatusProvider(), a.hostname, a.config)
+
+ a.auditor = auditor
+ a.destinationsCtx = destinationsCtx
+ a.pipelineProvider = pipelineProvider
+ a.health = health
+}
+
+// buildEndpoints builds endpoints for the logs agent
+func buildEndpoints(coreConfig pkgconfigmodel.Reader) (*config.Endpoints, error) {
+ httpConnectivity := config.HTTPConnectivityFailure
+ if endpoints, err := config.BuildHTTPEndpoints(coreConfig, intakeTrackType, config.AgentJSONIntakeProtocol, config.DefaultIntakeOrigin); err == nil {
+ httpConnectivity = http.CheckConnectivity(endpoints.Main, coreConfig)
+ }
+ return config.BuildEndpoints(coreConfig, httpConnectivity, intakeTrackType, config.AgentJSONIntakeProtocol, config.DefaultIntakeOrigin)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/logsagentpipeline.go b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/logsagentpipeline.go
new file mode 100644
index 0000000000..457ca39e48
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/logsagentpipeline.go
@@ -0,0 +1,19 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+// Package logsagentpipelineimpl contains the implementation for the logs agent pipeline component
+package logsagentpipelineimpl
+
+import (
+ "github.com/DataDog/datadog-agent/pkg/util/fxutil"
+
+ "go.uber.org/fx"
+)
+
+// Module defines the fx options for this component.
+func Module() fxutil.Module {
+ return fxutil.Component(
+ fx.Provide(NewLogsAgentComponent))
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/statusimpl.go b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/statusimpl.go
new file mode 100644
index 0000000000..69a280f5f7
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/statusimpl.go
@@ -0,0 +1,29 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+package logsagentpipelineimpl
+
+import (
+ "github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface"
+)
+
+// StatusProvider is the type for logs agent status methods
+type StatusProvider struct{}
+
+var _ statusinterface.Status = (*StatusProvider)(nil)
+
+// AddGlobalWarning keeps track of a warning message to display on the status.
+func (p StatusProvider) AddGlobalWarning(string, string) {
+}
+
+// RemoveGlobalWarning loses track of a warning message
+// that does not need to be displayed on the status anymore.
+func (p StatusProvider) RemoveGlobalWarning(string) {
+}
+
+// NewStatusProvider fetches the status and returns a service wrapping it
+func NewStatusProvider() statusinterface.Status {
+ return &StatusProvider{}
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter/LICENSE b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter/factory.go b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter/factory.go
new file mode 100644
index 0000000000..5f0d6717e8
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter/factory.go
@@ -0,0 +1,102 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021-present Datadog, Inc.
+
+// Package logsagentexporter contains a logs exporter which forwards logs to a channel.
+package logsagentexporter
+
+import (
+ "context"
+
+ "github.com/DataDog/datadog-agent/comp/logs/agent/config"
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/logs/sources"
+
+ "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes"
+ "go.opentelemetry.io/collector/component"
+ exp "go.opentelemetry.io/collector/exporter"
+ "go.opentelemetry.io/collector/exporter/exporterhelper"
+)
+
+const (
+ // TypeStr defines the logsagent exporter type string.
+ TypeStr = "logsagent"
+ stability = component.StabilityLevelStable
+ // logSourceName specifies the Datadog source tag value to be added to logs sent by the logs agent exporter.
+ logSourceName = "OTLP log ingestion"
+ // otelSource specifies a source to be added to all logs sent by the logs agent exporter. The tag has key `otel_source` and the value specified on this constant.
+ otelSource = "datadog_agent"
+)
+
+// Config defines configuration for the logs agent exporter.
+type Config struct {
+ OtelSource string
+ LogSourceName string
+}
+
+type factory struct {
+ logsAgentChannel chan *message.Message
+}
+
+// NewFactory creates a new logsagentexporter factory.
+func NewFactory(logsAgentChannel chan *message.Message) exp.Factory {
+ f := &factory{logsAgentChannel: logsAgentChannel}
+ cfgType, _ := component.NewType(TypeStr)
+
+ return exp.NewFactory(
+ cfgType,
+ func() component.Config {
+ return &Config{
+ OtelSource: otelSource,
+ LogSourceName: logSourceName,
+ }
+ },
+ exp.WithLogs(f.createLogsExporter, stability),
+ )
+}
+
+func (f *factory) createLogsExporter(
+ ctx context.Context,
+ set exp.CreateSettings,
+ c component.Config,
+) (exp.Logs, error) {
+ cfg := checkAndCastConfig(c)
+ logSource := sources.NewLogSource(cfg.LogSourceName, &config.LogsConfig{})
+
+ // TODO: Ideally the attributes translator would be created once and reused
+ // across all signals. This would need unifying the logsagent and serializer
+ // exporters into a single exporter.
+ attributesTranslator, err := attributes.NewTranslator(set.TelemetrySettings)
+ if err != nil {
+ return nil, err
+ }
+
+ exporter, err := newExporter(set.TelemetrySettings, cfg, logSource, f.logsAgentChannel, attributesTranslator)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx, cancel := context.WithCancel(ctx)
+ // cancel() runs on shutdown
+ return exporterhelper.NewLogsExporter(
+ ctx,
+ set,
+ c,
+ exporter.ConsumeLogs,
+ exporterhelper.WithShutdown(func(context.Context) error {
+ cancel()
+ return nil
+ }),
+ )
+}
+
+// checkAndCastConfig checks the configuration type and its warnings, and casts it to
+// the logs agent exporter Config struct.
+func checkAndCastConfig(c component.Config) *Config {
+ cfg, ok := c.(*Config)
+ if !ok {
+ panic("programming error: config structure is not of type *logsagentexporter.Config")
+ }
+ return cfg
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter.go b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter.go
new file mode 100644
index 0000000000..3f3ffd3050
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter.go
@@ -0,0 +1,95 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021-present Datadog, Inc.
+
+package logsagentexporter
+
+import (
+ "context"
+ "errors"
+ "strings"
+ "time"
+
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/logs/sources"
+ "github.com/DataDog/datadog-agent/pkg/util/scrubber"
+
+ "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes"
+ logsmapping "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs"
+ "github.com/stormcat24/protodep/pkg/logger"
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/pdata/plog"
+)
+
+type exporter struct {
+ set component.TelemetrySettings
+ logsAgentChannel chan *message.Message
+ logSource *sources.LogSource
+ translator *logsmapping.Translator
+}
+
+func newExporter(
+ set component.TelemetrySettings,
+ cfg *Config,
+ logSource *sources.LogSource,
+ logsAgentChannel chan *message.Message,
+ attributesTranslator *attributes.Translator,
+) (*exporter, error) {
+ translator, err := logsmapping.NewTranslator(set, attributesTranslator, cfg.OtelSource)
+ if err != nil {
+ return nil, err
+ }
+
+ return &exporter{
+ set: set,
+ logsAgentChannel: logsAgentChannel,
+ logSource: logSource,
+ translator: translator,
+ }, nil
+}
+
+func (e *exporter) ConsumeLogs(ctx context.Context, ld plog.Logs) (err error) {
+ defer func() {
+ if err != nil {
+ newErr, scrubbingErr := scrubber.ScrubString(err.Error())
+ if scrubbingErr != nil {
+ err = scrubbingErr
+ } else {
+ err = errors.New(newErr)
+ }
+ }
+ }()
+
+ payloads := e.translator.MapLogs(ctx, ld)
+ for _, ddLog := range payloads {
+ tags := strings.Split(ddLog.GetDdtags(), ",")
+ // Tags are set in the message origin instead
+ ddLog.Ddtags = nil
+ service := ""
+ if ddLog.Service != nil {
+ service = *ddLog.Service
+ }
+ status := ddLog.AdditionalProperties["status"]
+ if status == "" {
+ status = message.StatusInfo
+ }
+ origin := message.NewOrigin(e.logSource)
+ origin.SetTags(tags)
+ origin.SetService(service)
+ origin.SetSource(e.logSource.Name)
+
+ content, err := ddLog.MarshalJSON()
+ if err != nil {
+ logger.Error("Error parsing log: " + err.Error())
+ }
+
+ // ingestionTs is an internal field used for latency tracking on the status page, not the actual log timestamp.
+ ingestionTs := time.Now().UnixNano()
+ message := message.NewMessage(content, origin, status, ingestionTs)
+
+ e.logsAgentChannel <- message
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient/LICENSE b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient/doc.go b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient/doc.go
new file mode 100644
index 0000000000..f0856c04d0
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient/doc.go
@@ -0,0 +1,7 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024-present Datadog, Inc.
+
+// Package metricsclient provides a statsd.ClientInterface implementation to datadog exporter and datadog connector
+package metricsclient
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog/metrics_client.go b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient/metrics_client.go
similarity index 77%
rename from vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog/metrics_client.go
rename to vendor/github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient/metrics_client.go
index 025001ce90..6a83b4e8ad 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog/metrics_client.go
+++ b/vendor/github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient/metrics_client.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-package datadog // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog"
+package metricsclient
import (
"context"
@@ -15,7 +15,9 @@ import (
)
const (
- ExporterSourceTag = "datadogexporter"
+ // ExporterSourceTag is the source tag for Datadog exporter
+ ExporterSourceTag = "datadogexporter"
+ // ConnectorSourceTag is the source tag for Datadog connector
ConnectorSourceTag = "datadogconnector"
)
@@ -35,6 +37,7 @@ func InitializeMetricClient(mp metric.MeterProvider, source string) statsd.Clien
}
}
+// Gauge implements the Statsd Gauge interface
func (m *metricsClient) Gauge(name string, value float64, tags []string, _ float64) error {
// The last parameter is rate, but we're omitting it because rate does not have effect for gauge points: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/dedd44436ae064f5a0b43769d24adf897533957b/receiver/statsdreceiver/internal/protocol/metric_translator.go#L153-L156
m.mutex.Lock()
@@ -59,6 +62,7 @@ func (m *metricsClient) Gauge(name string, value float64, tags []string, _ float
return nil
}
+// Count implements the Statsd Count interface
func (m *metricsClient) Count(name string, value int64, tags []string, _ float64) error {
counter, err := m.meter.Int64Counter(name)
if err != nil {
@@ -85,6 +89,7 @@ func (m *metricsClient) attributeFromTags(tags []string) attribute.Set {
return attribute.NewSet(attr...)
}
+// Histogram implements the Statsd Histogram interface
func (m *metricsClient) Histogram(name string, value float64, tags []string, _ float64) error {
hist, err := m.meter.Float64Histogram(name)
if err != nil {
@@ -95,66 +100,82 @@ func (m *metricsClient) Histogram(name string, value float64, tags []string, _ f
return nil
}
+// Distribution implements the Statsd Distribution interface
func (m *metricsClient) Distribution(name string, value float64, tags []string, rate float64) error {
return m.Histogram(name, value, tags, rate)
}
+// Timing implements the Statsd Timing interface
func (m *metricsClient) Timing(name string, value time.Duration, tags []string, rate float64) error {
return m.TimeInMilliseconds(name, value.Seconds()*1000, tags, rate)
}
+// TimeInMilliseconds implements the Statsd TimeInMilliseconds interface
func (m *metricsClient) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error {
return m.Histogram(name, value, tags, rate)
}
+// Decr implements the Statsd Decr interface
func (m *metricsClient) Decr(name string, tags []string, rate float64) error {
return m.Count(name, -1, tags, rate)
}
+// Incr implements the Statsd Incr interface
func (m *metricsClient) Incr(name string, tags []string, rate float64) error {
return m.Count(name, 1, tags, rate)
}
+// Flush implements the Statsd Flush interface
func (m *metricsClient) Flush() error {
return nil
}
+// Set implements the Statsd Set interface
func (m *metricsClient) Set(string, string, []string, float64) error {
return nil
}
+// Event implements the Statsd Event interface
func (m *metricsClient) Event(*statsd.Event) error {
return nil
}
+// SimpleEvent implements the Statsd SimpleEvent interface
func (m *metricsClient) SimpleEvent(string, string) error {
return nil
}
+// ServiceCheck implements the Statsd ServiceCheck interface
func (m *metricsClient) ServiceCheck(*statsd.ServiceCheck) error {
return nil
}
+// SimpleServiceCheck implements the Statsd SimpleServiceCheck interface
func (m *metricsClient) SimpleServiceCheck(string, statsd.ServiceCheckStatus) error {
return nil
}
+// Close implements the Statsd Close interface
func (m *metricsClient) Close() error {
return nil
}
+// IsClosed implements the Statsd IsClosed interface
func (m *metricsClient) IsClosed() bool {
return false
}
+// GetTelemetry implements the Statsd GetTelemetry interface
func (m *metricsClient) GetTelemetry() statsd.Telemetry {
return statsd.Telemetry{}
}
+// GaugeWithTimestamp implements the Statsd GaugeWithTimestamp interface
func (m *metricsClient) GaugeWithTimestamp(string, float64, []string, float64, time.Time) error {
return nil
}
+// CountWithTimestamp implements the Statsd CountWithTimestamp interface
func (m *metricsClient) CountWithTimestamp(string, int64, []string, float64, time.Time) error {
return nil
}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/collector/check/defaults/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/collector/check/defaults/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/collector/check/defaults/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/collector/check/defaults/defaults.go b/vendor/github.com/DataDog/datadog-agent/pkg/collector/check/defaults/defaults.go
new file mode 100644
index 0000000000..d7b961f346
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/collector/check/defaults/defaults.go
@@ -0,0 +1,16 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package defaults
+
+import (
+ "time"
+)
+
+const (
+ // DefaultCheckInterval is the interval in seconds the scheduler should apply
+ // when no value was provided in Check configuration.
+ DefaultCheckInterval time.Duration = 15 * time.Second
+)
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/collector/check/defaults/doc.go b/vendor/github.com/DataDog/datadog-agent/pkg/collector/check/defaults/doc.go
new file mode 100644
index 0000000000..8f606a80f1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/collector/check/defaults/doc.go
@@ -0,0 +1,7 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package defaults provides common defaults used in agent checks
+package defaults
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/env/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/config/env/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/env/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/env/doc.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/env/doc.go
new file mode 100644
index 0000000000..98e69cfda7
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/env/doc.go
@@ -0,0 +1,7 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package env contains environment related configurations
+package env
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/env/environment.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/env/environment.go
new file mode 100644
index 0000000000..bdd2f7637d
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/env/environment.go
@@ -0,0 +1,96 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package env
+
+import (
+ "os"
+
+ "github.com/DataDog/datadog-agent/pkg/util/filesystem"
+)
+
+// GetEnvDefault retrieves a value from the environment named by the key or return def if not set.
+func GetEnvDefault(key, def string) string {
+ value, found := os.LookupEnv(key)
+ if !found {
+ return def
+ }
+ return value
+}
+
+// IsContainerized returns whether the Agent is running on a Docker container
+// DOCKER_DD_AGENT is set in our official Dockerfile
+func IsContainerized() bool {
+ return os.Getenv("DOCKER_DD_AGENT") != ""
+}
+
+// IsDockerRuntime returns true if we are to find the /.dockerenv file
+// which is typically only set by Docker
+func IsDockerRuntime() bool {
+ _, err := os.Stat("/.dockerenv")
+ return err == nil
+}
+
+// IsKubernetes returns whether the Agent is running on a kubernetes cluster
+func IsKubernetes() bool {
+ // Injected by Kubernetes itself
+ if os.Getenv("KUBERNETES_SERVICE_PORT") != "" {
+ return true
+ }
+ // support of Datadog environment variable for Kubernetes
+ if os.Getenv("KUBERNETES") != "" {
+ return true
+ }
+ return false
+}
+
+// IsECS returns whether the Agent is running on ECS
+func IsECS() bool {
+ if os.Getenv("AWS_EXECUTION_ENV") == "AWS_ECS_EC2" {
+ return true
+ }
+
+ if IsECSFargate() {
+ return false
+ }
+
+ if os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") != "" ||
+ os.Getenv("ECS_CONTAINER_METADATA_URI") != "" ||
+ os.Getenv("ECS_CONTAINER_METADATA_URI_V4") != "" {
+ return true
+ }
+
+ if _, err := os.Stat("/etc/ecs/ecs.config"); err == nil {
+ return true
+ }
+
+ return false
+}
+
+// IsECSFargate returns whether the Agent is running in ECS Fargate
+func IsECSFargate() bool {
+ return os.Getenv("ECS_FARGATE") != "" || os.Getenv("AWS_EXECUTION_ENV") == "AWS_ECS_FARGATE"
+}
+
+// IsHostProcAvailable returns whether host proc is available or not
+func IsHostProcAvailable() bool {
+ if IsContainerized() {
+ return filesystem.FileExists("/host/proc")
+ }
+ return true
+}
+
+// IsHostSysAvailable returns whether host proc is available or not
+func IsHostSysAvailable() bool {
+ if IsContainerized() {
+ return filesystem.FileExists("/host/sys")
+ }
+ return true
+}
+
+// IsServerless returns whether the Agent is running in a Lambda function
+func IsServerless() bool {
+ return os.Getenv("AWS_LAMBDA_FUNCTION_NAME") != ""
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/env/environment_container_features.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/env/environment_container_features.go
new file mode 100644
index 0000000000..cec6b8dd33
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/env/environment_container_features.go
@@ -0,0 +1,32 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package env
+
+// Remember to also register feature in init()
+const (
+ // Docker socket present
+ Docker Feature = "docker"
+ // Containerd socket present
+ Containerd Feature = "containerd"
+ // Cri is any cri socket present
+ Cri Feature = "cri"
+ // Kubernetes environment
+ Kubernetes Feature = "kubernetes"
+ // ECSEC2 environment
+ ECSEC2 Feature = "ecsec2"
+ // ECSFargate environment
+ ECSFargate Feature = "ecsfargate"
+ // EKSFargate environment
+ EKSFargate Feature = "eksfargate"
+ // KubeOrchestratorExplorer can be enabled
+ KubeOrchestratorExplorer Feature = "kube_orchestratorexplorer"
+ // ECSOrchestratorExplorer can be enabled
+ ECSOrchestratorExplorer Feature = "ecs_orchestratorexplorer"
+ // CloudFoundry socket present
+ CloudFoundry Feature = "cloudfoundry"
+ // Podman containers storage path accessible
+ Podman Feature = "podman"
+)
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/env/environment_containers.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/env/environment_containers.go
new file mode 100644
index 0000000000..7e1c267123
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/env/environment_containers.go
@@ -0,0 +1,277 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux || windows
+
+package env
+
+import (
+ "os"
+ "path"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/DataDog/datadog-agent/pkg/config/model"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+ "github.com/DataDog/datadog-agent/pkg/util/system/socket"
+)
+
+const (
+ defaultLinuxDockerSocket = "/var/run/docker.sock"
+ defaultWindowsDockerSocketPath = "//./pipe/docker_engine"
+ defaultLinuxContainerdSocket = "/var/run/containerd/containerd.sock"
+ defaultWindowsContainerdSocketPath = "//./pipe/containerd-containerd"
+ defaultLinuxCrioSocket = "/var/run/crio/crio.sock"
+ defaultHostMountPrefix = "/host"
+ defaultPodmanContainersStoragePath = "/var/lib/containers/storage"
+ unixSocketPrefix = "unix://"
+ winNamedPipePrefix = "npipe://"
+
+ socketTimeout = 500 * time.Millisecond
+)
+
+func init() {
+ registerFeature(Docker)
+ registerFeature(Containerd)
+ registerFeature(Cri)
+ registerFeature(Kubernetes)
+ registerFeature(ECSEC2)
+ registerFeature(ECSFargate)
+ registerFeature(EKSFargate)
+ registerFeature(KubeOrchestratorExplorer)
+ registerFeature(ECSOrchestratorExplorer)
+ registerFeature(CloudFoundry)
+ registerFeature(Podman)
+}
+
+// IsAnyContainerFeaturePresent checks if any of known container features is present
+func IsAnyContainerFeaturePresent() bool {
+ return IsFeaturePresent(Docker) ||
+ IsFeaturePresent(Containerd) ||
+ IsFeaturePresent(Cri) ||
+ IsFeaturePresent(Kubernetes) ||
+ IsFeaturePresent(ECSEC2) ||
+ IsFeaturePresent(ECSFargate) ||
+ IsFeaturePresent(EKSFargate) ||
+ IsFeaturePresent(CloudFoundry) ||
+ IsFeaturePresent(Podman)
+}
+
+func detectContainerFeatures(features FeatureMap, cfg model.Reader) {
+ detectKubernetes(features, cfg)
+ detectDocker(features)
+ detectContainerd(features, cfg)
+ detectAWSEnvironments(features, cfg)
+ detectCloudFoundry(features, cfg)
+ detectPodman(features, cfg)
+}
+
+func detectKubernetes(features FeatureMap, cfg model.Reader) {
+ if IsKubernetes() {
+ features[Kubernetes] = struct{}{}
+ if cfg.GetBool("orchestrator_explorer.enabled") {
+ features[KubeOrchestratorExplorer] = struct{}{}
+ }
+ }
+}
+
+func detectDocker(features FeatureMap) {
+ if _, dockerHostSet := os.LookupEnv("DOCKER_HOST"); dockerHostSet {
+ features[Docker] = struct{}{}
+ } else {
+ for _, defaultDockerSocketPath := range getDefaultDockerPaths() {
+ exists, reachable := socket.IsAvailable(defaultDockerSocketPath, socketTimeout)
+ if exists && !reachable {
+ log.Infof("Agent found Docker socket at: %s but socket not reachable (permissions?)", defaultDockerSocketPath)
+ continue
+ }
+
+ if exists && reachable {
+ features[Docker] = struct{}{}
+
+ // Even though it does not modify configuration, using the OverrideFunc mechanism for uniformity
+ model.AddOverrideFunc(func(model.Config) {
+ os.Setenv("DOCKER_HOST", getDefaultDockerSocketType()+defaultDockerSocketPath)
+ })
+ break
+ }
+ }
+ }
+}
+
+func detectContainerd(features FeatureMap, cfg model.Reader) {
+ // CRI Socket - Do not automatically default socket path if the Agent runs in Docker
+ // as we'll very likely discover the containerd instance wrapped by Docker.
+ criSocket := cfg.GetString("cri_socket_path")
+ if criSocket == "" && !IsDockerRuntime() {
+ for _, defaultCriPath := range getDefaultCriPaths() {
+ exists, reachable := socket.IsAvailable(defaultCriPath, socketTimeout)
+ if exists && !reachable {
+ log.Infof(
+ "Agent found cri socket at: %s but socket not reachable (permissions?)",
+ defaultCriPath,
+ )
+ continue
+ }
+
+ if exists && reachable {
+ criSocket = defaultCriPath
+ model.AddOverride("cri_socket_path", defaultCriPath)
+ // Currently we do not support multiple CRI paths
+ break
+ }
+ }
+ }
+
+ if criSocket != "" {
+ if isCriSupported() {
+ features[Cri] = struct{}{}
+ }
+
+ if strings.Contains(criSocket, "containerd") {
+ features[Containerd] = struct{}{}
+ }
+ }
+
+ // Merge containerd_namespace with containerd_namespaces
+ namespaces := merge(
+ cfg.GetStringSlice("containerd_namespaces"),
+ cfg.GetStringSlice("containerd_namespace"),
+ )
+
+ // Workaround: convert to []interface{}.
+ // The MergeConfigOverride func in "github.com/DataDog/viper" (tested in
+ // v1.10.0) raises an error if we send a []string{} in AddOverride():
+ // "svType != tvType; key=containerd_namespace, st=[]interface {}, tt=[]string, sv=[], tv=[]"
+ // The reason is that when reading from a config file, all the arrays are
+ // considered as []interface{} by Viper, and the merge fails when the types
+ // are different.
+ convertedNamespaces := make([]interface{}, len(namespaces))
+ for i, namespace := range namespaces {
+ convertedNamespaces[i] = namespace
+ }
+
+ model.AddOverride("containerd_namespace", convertedNamespaces)
+ model.AddOverride("containerd_namespaces", convertedNamespaces)
+}
+
+func isCriSupported() bool {
+ // Containerd support was historically meant for K8S
+ // However, containerd is now used standalone elsewhere.
+ return IsKubernetes()
+}
+
+func detectAWSEnvironments(features FeatureMap, cfg model.Reader) {
+ if IsECSFargate() {
+ features[ECSFargate] = struct{}{}
+ if cfg.GetBool("orchestrator_explorer.enabled") &&
+ cfg.GetBool("orchestrator_explorer.ecs_collection.enabled") {
+ features[ECSOrchestratorExplorer] = struct{}{}
+ }
+ return
+ }
+
+ if cfg.GetBool("eks_fargate") {
+ features[EKSFargate] = struct{}{}
+ features[Kubernetes] = struct{}{}
+ return
+ }
+
+ if IsECS() {
+ features[ECSEC2] = struct{}{}
+ if cfg.GetBool("orchestrator_explorer.enabled") &&
+ cfg.GetBool("orchestrator_explorer.ecs_collection.enabled") {
+ features[ECSOrchestratorExplorer] = struct{}{}
+ }
+ }
+}
+
+func detectCloudFoundry(features FeatureMap, cfg model.Reader) {
+ if cfg.GetBool("cloud_foundry") {
+ features[CloudFoundry] = struct{}{}
+ }
+}
+
+func detectPodman(features FeatureMap, cfg model.Reader) {
+ podmanDbPath := cfg.GetString("podman_db_path")
+ if podmanDbPath != "" {
+ features[Podman] = struct{}{}
+ return
+ }
+ for _, defaultPath := range getDefaultPodmanPaths() {
+ if _, err := os.Stat(defaultPath); err == nil {
+ features[Podman] = struct{}{}
+ return
+ }
+ }
+}
+
+func getHostMountPrefixes() []string {
+ if IsContainerized() {
+ return []string{"", defaultHostMountPrefix}
+ }
+ return []string{""}
+}
+
+func getDefaultDockerSocketType() string {
+ if runtime.GOOS == "windows" {
+ return winNamedPipePrefix
+ }
+
+ return unixSocketPrefix
+}
+
+func getDefaultDockerPaths() []string {
+ if runtime.GOOS == "windows" {
+ return []string{defaultWindowsDockerSocketPath}
+ }
+
+ paths := []string{}
+ for _, prefix := range getHostMountPrefixes() {
+ paths = append(paths, path.Join(prefix, defaultLinuxDockerSocket))
+ }
+ return paths
+}
+
+func getDefaultCriPaths() []string {
+ if runtime.GOOS == "windows" {
+ return []string{defaultWindowsContainerdSocketPath}
+ }
+
+ paths := []string{}
+ for _, prefix := range getHostMountPrefixes() {
+ paths = append(
+ paths,
+ path.Join(prefix, defaultLinuxContainerdSocket),
+ path.Join(prefix, defaultLinuxCrioSocket),
+ )
+ }
+ return paths
+}
+
+func getDefaultPodmanPaths() []string {
+ paths := []string{}
+ for _, prefix := range getHostMountPrefixes() {
+ paths = append(paths, path.Join(prefix, defaultPodmanContainersStoragePath))
+ }
+ return paths
+}
+
+// merge merges and dedupes 2 slices without changing order
+func merge(s1, s2 []string) []string {
+ dedupe := map[string]struct{}{}
+ merged := []string{}
+
+ for _, elem := range append(s1, s2...) {
+ if _, seen := dedupe[elem]; !seen {
+ merged = append(merged, elem)
+ }
+
+ dedupe[elem] = struct{}{}
+ }
+
+ return merged
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/env/environment_detection.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/env/environment_detection.go
new file mode 100644
index 0000000000..b50c92b7b9
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/env/environment_detection.go
@@ -0,0 +1,158 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package env
+
+import (
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/DataDog/datadog-agent/pkg/config/model"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+const (
+ autoconfEnvironmentVariable = "AUTOCONFIG_FROM_ENVIRONMENT"
+ autoconfEnvironmentVariableWithTypo = "AUTCONFIG_FROM_ENVIRONMENT"
+)
+
+// Feature represents a feature of current environment
+type Feature string
+
+// FeatureMap represents all detected features
+type FeatureMap map[Feature]struct{}
+
+func (fm FeatureMap) String() string {
+ features := make([]string, 0, len(fm))
+ for f := range fm {
+ features = append(features, string(f))
+ }
+
+ return strings.Join(features, ",")
+}
+
+var (
+ knownFeatures = make(FeatureMap)
+ detectedFeatures FeatureMap
+ featureLock sync.RWMutex
+ detectionAlwaysDisabledInTests bool
+)
+
+// GetDetectedFeatures returns all detected features (detection only performed once)
+func GetDetectedFeatures() FeatureMap {
+ featureLock.RLock()
+ defer featureLock.RUnlock()
+
+ if detectedFeatures == nil {
+ // If this function is called while feature detection has not run
+ // it means Confifguration has not been loaded, which is an unexpected flow in our code
+ // It's not useful to do lazy detection as it would also mean Configuration has not been loaded
+ panic("Trying to access features before detection has run")
+ }
+
+ return detectedFeatures
+}
+
+// IsFeaturePresent returns if a particular feature is activated
+func IsFeaturePresent(feature Feature) bool {
+ featureLock.RLock()
+ defer featureLock.RUnlock()
+
+ if detectedFeatures == nil {
+ // If this function is called while feature detection has not run
+ // it means Confifguration has not been loaded, which is an unexpected flow in our code
+ // It's not useful to do lazy detection as it would also mean Configuration has not been loaded
+ panic("Trying to access features before detection has run")
+ }
+
+ _, found := detectedFeatures[feature]
+ return found
+}
+
+// IsAutoconfigEnabled returns if autoconfig from environment is activated or not
+func IsAutoconfigEnabled(cfg model.Reader) bool {
+ // Usage of pure environment variables should be deprecated
+ for _, envVar := range []string{autoconfEnvironmentVariable, autoconfEnvironmentVariableWithTypo} {
+ if autoconfStr, found := os.LookupEnv(envVar); found {
+ activateAutoconfFromEnv, err := strconv.ParseBool(autoconfStr)
+ if err != nil {
+ log.Errorf("Unable to parse Autoconf value: '%s', err: %v - autoconfig from environment will be deactivated", autoconfStr, err)
+ return false
+ }
+
+ log.Warnf("Usage of '%s' variable is deprecated - please use DD_AUTOCONFIG_FROM_ENVIRONMENT or 'autoconfig_from_environment' in config file", envVar)
+ return activateAutoconfFromEnv
+ }
+ }
+
+ return cfg.GetBool("autoconfig_from_environment")
+}
+
+// DetectFeatures runs the feature detection.
+// We guarantee that Datadog configuration is entirely loaded (env + YAML)
+// before this function is called
+func DetectFeatures(cfg model.Reader) {
+ featureLock.Lock()
+ defer featureLock.Unlock()
+
+ // Detection should not run in unit tests to avoid overriding features based on runner environment
+ if detectionAlwaysDisabledInTests {
+ return
+ }
+
+ newFeatures := make(FeatureMap)
+ if IsAutoconfigEnabled(cfg) {
+ detectContainerFeatures(newFeatures, cfg)
+ excludedFeatures := cfg.GetStringSlice("autoconfig_exclude_features")
+ excludeFeatures(newFeatures, excludedFeatures)
+
+ includedFeatures := cfg.GetStringSlice("autoconfig_include_features")
+ for _, f := range includedFeatures {
+ f = strings.ToLower(f)
+ if _, found := knownFeatures[Feature(f)]; found {
+ newFeatures[Feature(f)] = struct{}{}
+ } else {
+ log.Warnf("Unknown feature in autoconfig_include_features: %s - discarding", f)
+ }
+ }
+
+ log.Infof("%d Features detected from environment: %v", len(newFeatures), newFeatures)
+ } else {
+ log.Warnf("Deactivating Autoconfig will disable most components. It's recommended to use autoconfig_exclude_features and autoconfig_include_features to activate/deactivate features selectively")
+ }
+ detectedFeatures = newFeatures
+}
+
+func excludeFeatures(detectedFeatures FeatureMap, excludedFeatures []string) {
+ rFilters := make([]*regexp.Regexp, 0, len(excludedFeatures))
+
+ for _, filter := range excludedFeatures {
+ filter = strings.ToLower(strings.TrimPrefix(filter, "name:"))
+ r, err := regexp.Compile(filter)
+ if err != nil {
+ log.Warnf("Unbale to parse exclude feature filter: '%s'", filter)
+ continue
+ }
+
+ rFilters = append(rFilters, r)
+ }
+
+ for feature := range detectedFeatures {
+ for _, r := range rFilters {
+ if r.MatchString(string(feature)) {
+ delete(detectedFeatures, feature)
+ break
+ }
+ }
+ }
+}
+
+//nolint:deadcode,unused
+func registerFeature(f Feature) {
+ knownFeatures[f] = struct{}{}
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/env/environment_nocontainers.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/env/environment_nocontainers.go
new file mode 100644
index 0000000000..d873f9a1d0
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/env/environment_nocontainers.go
@@ -0,0 +1,18 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build !(linux || windows)
+
+package env
+
+import "github.com/DataDog/datadog-agent/pkg/config/model"
+
+// IsAnyContainerFeaturePresent checks if any of known container features is present
+func IsAnyContainerFeaturePresent() bool {
+ return false
+}
+
+func detectContainerFeatures(_ FeatureMap, _ model.Reader) {
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/env/environment_testing.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/env/environment_testing.go
new file mode 100644
index 0000000000..e84e00ec20
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/env/environment_testing.go
@@ -0,0 +1,41 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build test
+
+package env
+
+import "testing"
+
+// Setting a default list of features with what is widely used in unit tests.
+func init() {
+ detectionAlwaysDisabledInTests = true
+ detectedFeatures = FeatureMap{}
+}
+
+// SetFeatures automatically remove feature flags through t.Cleanup
+func SetFeatures(t testing.TB, features ...Feature) {
+ SetFeaturesNoCleanup(features...)
+ t.Cleanup(ClearFeatures)
+}
+
+// SetFeaturesNoCleanup DO NOT USE (except in specific integration tests which don't have a testing.T available)
+func SetFeaturesNoCleanup(features ...Feature) {
+ featureLock.Lock()
+ defer featureLock.Unlock()
+
+ detectedFeatures = make(FeatureMap)
+ for _, feature := range features {
+ detectedFeatures[feature] = struct{}{}
+ }
+}
+
+// ClearFeatures remove all set features
+func ClearFeatures() {
+ featureLock.Lock()
+ defer featureLock.Unlock()
+
+ detectedFeatures = make(FeatureMap)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/model/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/config/model/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/model/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/model/config_overrides.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/model/config_overrides.go
new file mode 100644
index 0000000000..93670a2521
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/model/config_overrides.go
@@ -0,0 +1,52 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package model
+
+var (
+ overrideVars = make(map[string]interface{})
+ overrideFuncs = make([]func(Config), 0)
+)
+
+func init() {
+ AddOverrideFunc(applyOverrideVars)
+}
+
+// AddOverrideFunc allows to add a custom logic to override configuration.
+// This method must be called before Load() to be effective.
+func AddOverrideFunc(f func(Config)) {
+ overrideFuncs = append(overrideFuncs, f)
+}
+
+// AddOverride provides an externally accessible method for
+// overriding config variables.
+// This method must be called before Load() to be effective.
+func AddOverride(name string, value interface{}) {
+ overrideVars[name] = value
+}
+
+// AddOverrides provides an externally accessible method for
+// overriding config variables.
+// This method must be called before Load() to be effective.
+func AddOverrides(vars map[string]interface{}) {
+ for k, v := range vars {
+ overrideVars[k] = v
+ }
+}
+
+// ApplyOverrideFuncs calls overrideFuncs
+func ApplyOverrideFuncs(config Config) {
+ for _, f := range overrideFuncs {
+ f(config)
+ }
+}
+
+func applyOverrideVars(config Config) {
+ for k, v := range overrideVars {
+ if config.IsKnown(k) {
+ config.Set(k, v, SourceEnvVar)
+ }
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/model/doc.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/model/doc.go
new file mode 100644
index 0000000000..19d2ad916b
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/model/doc.go
@@ -0,0 +1,7 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package model contains types for Agent config
+package model
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/model/types.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/model/types.go
new file mode 100644
index 0000000000..79ed11a808
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/model/types.go
@@ -0,0 +1,158 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package model
+
+import (
+ "io"
+ "strings"
+ "time"
+
+ "github.com/DataDog/viper"
+ "github.com/spf13/afero"
+ "github.com/spf13/pflag"
+)
+
+// Proxy represents the configuration for proxies in the agent
+type Proxy struct {
+ HTTP string `mapstructure:"http"`
+ HTTPS string `mapstructure:"https"`
+ NoProxy []string `mapstructure:"no_proxy"`
+}
+
+// NotificationReceiver represents the callback type to receive notifications each time the `Set` method is called. The
+// configuration will call each NotificationReceiver registered through the 'OnUpdate' method, therefore
+// 'NotificationReceiver' should not be blocking.
+type NotificationReceiver func(setting string, oldValue, newValue any)
+
+// Reader is a subset of Config that only allows reading of configuration
+type Reader interface {
+ Get(key string) interface{}
+ GetString(key string) string
+ GetBool(key string) bool
+ GetInt(key string) int
+ GetInt32(key string) int32
+ GetInt64(key string) int64
+ GetFloat64(key string) float64
+ GetTime(key string) time.Time
+ GetDuration(key string) time.Duration
+ GetStringSlice(key string) []string
+ GetFloat64SliceE(key string) ([]float64, error)
+ GetStringMap(key string) map[string]interface{}
+ GetStringMapString(key string) map[string]string
+ GetStringMapStringSlice(key string) map[string][]string
+ GetSizeInBytes(key string) uint
+ GetProxies() *Proxy
+
+ GetSource(key string) Source
+ GetAllSources(key string) []ValueWithSource
+
+ ConfigFileUsed() string
+
+ AllSettings() map[string]interface{}
+ AllSettingsWithoutDefault() map[string]interface{}
+ AllSourceSettingsWithoutDefault(source Source) map[string]interface{}
+ // AllKeysLowercased returns all config keys in the config, no matter how they are set.
+ // Note that it returns the keys lowercased.
+ AllKeysLowercased() []string
+
+ IsSet(key string) bool
+ IsSetForSource(key string, source Source) bool
+
+ // UnmarshalKey Unmarshal a configuration key into a struct
+ UnmarshalKey(key string, rawVal interface{}, opts ...viper.DecoderConfigOption) error
+
+ // IsKnown returns whether this key is known
+ IsKnown(key string) bool
+
+ // GetKnownKeysLowercased returns all the keys that meet at least one of these criteria:
+ // 1) have a default, 2) have an environment variable binded, 3) are an alias or 4) have been SetKnown()
+ // Note that it returns the keys lowercased.
+ GetKnownKeysLowercased() map[string]interface{}
+
+ // GetEnvVars returns a list of the env vars that the config supports.
+ // These have had the EnvPrefix applied, as well as the EnvKeyReplacer.
+ GetEnvVars() []string
+
+ // IsSectionSet checks if a given section is set by checking if any of
+ // its subkeys is set.
+ IsSectionSet(section string) bool
+
+ // Warnings returns pointer to a list of warnings (completes config.Component interface)
+ Warnings() *Warnings
+
+ // Object returns Reader to config (completes config.Component interface)
+ Object() Reader
+
+ // OnUpdate adds a callback to the list receivers to be called each time a value is change in the configuration
+ // by a call to the 'Set' method. The configuration will sequentially call each receiver.
+ OnUpdate(callback NotificationReceiver)
+}
+
+// Writer is a subset of Config that only allows writing the configuration
+type Writer interface {
+ Set(key string, value interface{}, source Source)
+ SetWithoutSource(key string, value interface{})
+ UnsetForSource(key string, source Source)
+ CopyConfig(cfg Config)
+}
+
+// ReaderWriter is a subset of Config that allows reading and writing the configuration
+type ReaderWriter interface {
+ Reader
+ Writer
+}
+
+// Loader is a subset of Config that allows loading the configuration
+type Loader interface {
+ // API implemented by viper.Viper
+
+ SetDefault(key string, value interface{})
+ SetFs(fs afero.Fs)
+
+ SetEnvPrefix(in string)
+ BindEnv(input ...string)
+ SetEnvKeyReplacer(r *strings.Replacer)
+ SetEnvKeyTransformer(key string, fn func(string) interface{})
+
+ UnmarshalKey(key string, rawVal interface{}, opts ...viper.DecoderConfigOption) error
+ Unmarshal(rawVal interface{}) error
+ UnmarshalExact(rawVal interface{}) error
+
+ ReadInConfig() error
+ ReadConfig(in io.Reader) error
+ MergeConfig(in io.Reader) error
+ MergeConfigMap(cfg map[string]any) error
+
+ AddConfigPath(in string)
+ SetConfigName(in string)
+ SetConfigFile(in string)
+ SetConfigType(in string)
+
+ BindPFlag(key string, flag *pflag.Flag) error
+
+ // SetKnown adds a key to the set of known valid config keys
+ SetKnown(key string)
+
+ // API not implemented by viper.Viper and that have proven useful for our config usage
+
+ // BindEnvAndSetDefault sets the default value for a config parameter and adds an env binding
+ // in one call, used for most config options.
+ //
+ // If env is provided, it will override the name of the environment variable used for this
+ // config key
+ BindEnvAndSetDefault(key string, val interface{}, env ...string)
+}
+
+// Config represents an object that can load and store configuration parameters
+// coming from different kind of sources:
+// - defaults
+// - files
+// - environment variables
+// - flags
+type Config interface {
+ ReaderWriter
+ Loader
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/model/viper.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/model/viper.go
new file mode 100644
index 0000000000..d1329fba55
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/model/viper.go
@@ -0,0 +1,768 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package model
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/DataDog/viper"
+ "github.com/mohae/deepcopy"
+ "github.com/spf13/afero"
+ "github.com/spf13/pflag"
+ "golang.org/x/exp/slices"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+// Source stores what edits a setting as a string
+type Source string
+
+// Declare every known Source
+const (
+ SourceDefault Source = "default"
+ SourceUnknown Source = "unknown"
+ SourceFile Source = "file"
+ SourceEnvVar Source = "environment-variable"
+ SourceAgentRuntime Source = "agent-runtime"
+ SourceLocalConfigProcess Source = "local-config-process"
+ SourceRC Source = "remote-config"
+ SourceCLI Source = "cli"
+)
+
+// sources list the known sources, following the order of hierarchy between them
+var sources = []Source{SourceDefault, SourceUnknown, SourceFile, SourceEnvVar, SourceAgentRuntime, SourceLocalConfigProcess, SourceRC, SourceCLI}
+
+// ValueWithSource is a tuple for a source and a value, not necessarily the applied value in the main config
+type ValueWithSource struct {
+ Source Source
+ Value interface{}
+}
+
+// String casts Source into a string
+func (s Source) String() string {
+ // Safeguard: if we don't know the Source, we assume SourceUnknown
+ if s == "" {
+ return string(SourceUnknown)
+ }
+ return string(s)
+}
+
+// safeConfig implements Config:
+// - wraps viper with a safety lock
+// - implements the additional DDHelpers
+type safeConfig struct {
+ *viper.Viper
+ configSources map[Source]*viper.Viper
+ sync.RWMutex
+ envPrefix string
+ envKeyReplacer *strings.Replacer
+
+ notificationReceivers []NotificationReceiver
+
+ // Proxy settings
+ proxiesOnce sync.Once
+ proxies *Proxy
+
+ // configEnvVars is the set of env vars that are consulted for
+ // configuration values.
+ configEnvVars map[string]struct{}
+
+ // keys that have been used but are unknown
+ // used to warn (a single time) on use
+ unknownKeys map[string]struct{}
+}
+
+// OnUpdate adds a callback to the list receivers to be called each time a value is changed in the configuration
+// by a call to the 'Set' method.
+// Callbacks are only called if the value is effectively changed.
+func (c *safeConfig) OnUpdate(callback NotificationReceiver) {
+ c.Lock()
+ defer c.Unlock()
+ c.notificationReceivers = append(c.notificationReceivers, callback)
+}
+
+// Set wraps Viper for concurrent access
+func (c *safeConfig) Set(key string, newValue interface{}, source Source) {
+ if source == SourceDefault {
+ c.SetDefault(key, newValue)
+ return
+ }
+
+ // modify the config then release the lock to avoid deadlocks while notifying
+ var receivers []NotificationReceiver
+ c.Lock()
+ previousValue := c.Viper.Get(key)
+ c.configSources[source].Set(key, newValue)
+ c.mergeViperInstances(key)
+ if !reflect.DeepEqual(previousValue, newValue) {
+ // if the value has not changed, do not duplicate the slice so that no callback is called
+ receivers = slices.Clone(c.notificationReceivers)
+ }
+ c.Unlock()
+
+ // notifying all receiver about the updated setting
+ for _, receiver := range receivers {
+ receiver(key, previousValue, newValue)
+ }
+}
+
+// SetWithoutSource sets the given value using source Unknown
+func (c *safeConfig) SetWithoutSource(key string, value interface{}) {
+ c.Set(key, value, SourceUnknown)
+}
+
+// SetDefault wraps Viper for concurrent access
+func (c *safeConfig) SetDefault(key string, value interface{}) {
+ c.Lock()
+ defer c.Unlock()
+ c.configSources[SourceDefault].Set(key, value)
+ c.Viper.SetDefault(key, value)
+}
+
+// UnsetForSource wraps Viper for concurrent access
+func (c *safeConfig) UnsetForSource(key string, source Source) {
+ c.Lock()
+ defer c.Unlock()
+ c.configSources[source].Set(key, nil)
+ c.mergeViperInstances(key)
+}
+
+// mergeViperInstances is called after a change in an instance of Viper
+// to recompute the state of the main Viper
+// (it must be used with a lock to prevent concurrent access to Viper)
+func (c *safeConfig) mergeViperInstances(key string) {
+ var val interface{}
+ for _, source := range sources {
+ if currVal := c.configSources[source].Get(key); currVal != nil {
+ val = currVal
+ }
+ }
+ c.Viper.Set(key, val)
+}
+
+// SetKnown adds a key to the set of known valid config keys
+func (c *safeConfig) SetKnown(key string) {
+ c.Lock()
+ defer c.Unlock()
+ c.Viper.SetKnown(key)
+}
+
+// IsKnown returns whether a key is known
+func (c *safeConfig) IsKnown(key string) bool {
+ c.RLock()
+ defer c.RUnlock()
+
+ return c.Viper.IsKnown(key)
+}
+
+// checkKnownKey checks if a key is known, and if not logs a warning
+// Only a single warning will be logged per unknown key.
+//
+// Must be called with the lock read-locked.
+// The lock can be released and re-locked.
+func (c *safeConfig) checkKnownKey(key string) {
+ if c.Viper.IsKnown(key) {
+ return
+ }
+
+ if _, ok := c.unknownKeys[key]; ok {
+ return
+ }
+
+ // need to write-lock to add the key to the unknownKeys map
+ c.RUnlock()
+ // but we need to have the lock in the same state (RLocked) at the end of the function
+ defer c.RLock()
+
+ c.Lock()
+ c.unknownKeys[key] = struct{}{}
+ c.Unlock()
+
+ // log without holding the lock
+ log.Warnf("config key %v is unknown", key)
+}
+
+// GetKnownKeysLowercased returns all the keys that meet at least one of these criteria:
+// 1) have a default, 2) have an environment variable binded or 3) have been SetKnown()
+// Note that it returns the keys lowercased.
+func (c *safeConfig) GetKnownKeysLowercased() map[string]interface{} {
+ c.RLock()
+ defer c.RUnlock()
+
+ // GetKnownKeysLowercased returns a fresh map, so the caller may do with it
+ // as they please without holding the lock.
+ return c.Viper.GetKnownKeys()
+}
+
+// SetEnvKeyTransformer allows defining a transformer function which decides
+// how an environment variables value gets assigned to key.
+func (c *safeConfig) SetEnvKeyTransformer(key string, fn func(string) interface{}) {
+ c.Lock()
+ defer c.Unlock()
+ c.Viper.SetEnvKeyTransformer(key, fn)
+}
+
+// SetFs wraps Viper for concurrent access
+func (c *safeConfig) SetFs(fs afero.Fs) {
+ c.Lock()
+ defer c.Unlock()
+ c.Viper.SetFs(fs)
+}
+
+// IsSet wraps Viper for concurrent access
+func (c *safeConfig) IsSet(key string) bool {
+ c.RLock()
+ defer c.RUnlock()
+ return c.Viper.IsSet(key)
+}
+
+// IsSet wraps Viper for concurrent access
+func (c *safeConfig) IsSetForSource(key string, source Source) bool {
+ c.RLock()
+ defer c.RUnlock()
+ return c.configSources[source].IsSet(key)
+}
+
+// IsSectionSet checks if a section is set by checking if either it
+// or any of its subkeys is set.
+func (c *safeConfig) IsSectionSet(section string) bool {
+ // The section is considered set if any of the keys
+ // inside it is set.
+ // This is needed when keys within the section
+ // are set through env variables.
+
+ // Add trailing . to make sure we don't take into account unrelated
+ // settings, eg. IsSectionSet("section") shouldn't return true
+ // if "section_key" is set.
+ sectionPrefix := section + "."
+
+ for _, key := range c.AllKeysLowercased() {
+ if strings.HasPrefix(key, sectionPrefix) && c.IsSet(key) {
+ return true
+ }
+ }
+
+ // If none of the keys are set, the section is still considered as set
+ // if it has been explicitly set in the config.
+ return c.IsSet(section)
+}
+
+func (c *safeConfig) AllKeysLowercased() []string {
+ c.RLock()
+ defer c.RUnlock()
+ return c.Viper.AllKeys()
+}
+
+// Get wraps Viper for concurrent access
+func (c *safeConfig) Get(key string) interface{} {
+ c.RLock()
+ defer c.RUnlock()
+ c.checkKnownKey(key)
+ val, err := c.Viper.GetE(key)
+ if err != nil {
+ log.Warnf("failed to get configuration value for key %q: %s", key, err)
+ }
+ return deepcopy.Copy(val)
+}
+
+// GetAllSources returns the value of a key for each source
+func (c *safeConfig) GetAllSources(key string) []ValueWithSource {
+ c.RLock()
+ defer c.RUnlock()
+ c.checkKnownKey(key)
+ vals := make([]ValueWithSource, len(sources))
+ for i, source := range sources {
+ vals[i] = ValueWithSource{
+ Source: source,
+ Value: deepcopy.Copy(c.configSources[source].Get(key)),
+ }
+ }
+ return vals
+}
+
+// GetString wraps Viper for concurrent access
+func (c *safeConfig) GetString(key string) string {
+ c.RLock()
+ defer c.RUnlock()
+ c.checkKnownKey(key)
+ val, err := c.Viper.GetStringE(key)
+ if err != nil {
+ log.Warnf("failed to get configuration value for key %q: %s", key, err)
+ }
+ return val
+}
+
+// GetBool wraps Viper for concurrent access
+func (c *safeConfig) GetBool(key string) bool {
+ c.RLock()
+ defer c.RUnlock()
+ c.checkKnownKey(key)
+ val, err := c.Viper.GetBoolE(key)
+ if err != nil {
+ log.Warnf("failed to get configuration value for key %q: %s", key, err)
+ }
+ return val
+}
+
+// GetInt wraps Viper for concurrent access
+func (c *safeConfig) GetInt(key string) int {
+ c.RLock()
+ defer c.RUnlock()
+ c.checkKnownKey(key)
+ val, err := c.Viper.GetIntE(key)
+ if err != nil {
+ log.Warnf("failed to get configuration value for key %q: %s", key, err)
+ }
+ return val
+}
+
+// GetInt32 wraps Viper for concurrent access
+func (c *safeConfig) GetInt32(key string) int32 {
+ c.RLock()
+ defer c.RUnlock()
+ c.checkKnownKey(key)
+ val, err := c.Viper.GetInt32E(key)
+ if err != nil {
+ log.Warnf("failed to get configuration value for key %q: %s", key, err)
+ }
+ return val
+}
+
+// GetInt64 wraps Viper for concurrent access
+func (c *safeConfig) GetInt64(key string) int64 {
+ c.RLock()
+ defer c.RUnlock()
+ c.checkKnownKey(key)
+ val, err := c.Viper.GetInt64E(key)
+ if err != nil {
+ log.Warnf("failed to get configuration value for key %q: %s", key, err)
+ }
+ return val
+}
+
+// GetFloat64 wraps Viper for concurrent access
+func (c *safeConfig) GetFloat64(key string) float64 {
+ c.RLock()
+ defer c.RUnlock()
+ c.checkKnownKey(key)
+ val, err := c.Viper.GetFloat64E(key)
+ if err != nil {
+ log.Warnf("failed to get configuration value for key %q: %s", key, err)
+ }
+ return val
+}
+
+// GetTime wraps Viper for concurrent access
+func (c *safeConfig) GetTime(key string) time.Time {
+ c.RLock()
+ defer c.RUnlock()
+ c.checkKnownKey(key)
+ val, err := c.Viper.GetTimeE(key)
+ if err != nil {
+ log.Warnf("failed to get configuration value for key %q: %s", key, err)
+ }
+ return val
+}
+
+// GetDuration wraps Viper for concurrent access
+func (c *safeConfig) GetDuration(key string) time.Duration {
+ c.RLock()
+ defer c.RUnlock()
+ c.checkKnownKey(key)
+ val, err := c.Viper.GetDurationE(key)
+ if err != nil {
+ log.Warnf("failed to get configuration value for key %q: %s", key, err)
+ }
+ return val
+}
+
+// GetStringSlice wraps Viper for concurrent access
+func (c *safeConfig) GetStringSlice(key string) []string {
+ c.RLock()
+ defer c.RUnlock()
+ c.checkKnownKey(key)
+ val, err := c.Viper.GetStringSliceE(key)
+ if err != nil {
+ log.Warnf("failed to get configuration value for key %q: %s", key, err)
+ }
+ return slices.Clone(val)
+}
+
+// GetFloat64SliceE loads a key as a []float64
+func (c *safeConfig) GetFloat64SliceE(key string) ([]float64, error) {
+ c.RLock()
+ defer c.RUnlock()
+ c.checkKnownKey(key)
+
+ // We're using GetStringSlice because viper can only parse list of string from env variables
+ list, err := c.Viper.GetStringSliceE(key)
+ if err != nil {
+ return nil, fmt.Errorf("'%v' is not a list", key)
+ }
+
+ res := []float64{}
+ for _, item := range list {
+ nb, err := strconv.ParseFloat(item, 64)
+ if err != nil {
+ return nil, fmt.Errorf("value '%v' from '%v' is not a float64", item, key)
+ }
+ res = append(res, nb)
+ }
+ return res, nil
+}
+
+// GetStringMap wraps Viper for concurrent access
+func (c *safeConfig) GetStringMap(key string) map[string]interface{} {
+ c.RLock()
+ defer c.RUnlock()
+ c.checkKnownKey(key)
+ val, err := c.Viper.GetStringMapE(key)
+ if err != nil {
+ log.Warnf("failed to get configuration value for key %q: %s", key, err)
+ }
+ return deepcopy.Copy(val).(map[string]interface{})
+}
+
+// GetStringMapString wraps Viper for concurrent access
+func (c *safeConfig) GetStringMapString(key string) map[string]string {
+ c.RLock()
+ defer c.RUnlock()
+ c.checkKnownKey(key)
+ val, err := c.Viper.GetStringMapStringE(key)
+ if err != nil {
+ log.Warnf("failed to get configuration value for key %q: %s", key, err)
+ }
+ return deepcopy.Copy(val).(map[string]string)
+}
+
+// GetStringMapStringSlice wraps Viper for concurrent access
+func (c *safeConfig) GetStringMapStringSlice(key string) map[string][]string {
+ c.RLock()
+ defer c.RUnlock()
+ c.checkKnownKey(key)
+ val, err := c.Viper.GetStringMapStringSliceE(key)
+ if err != nil {
+ log.Warnf("failed to get configuration value for key %q: %s", key, err)
+ }
+ return deepcopy.Copy(val).(map[string][]string)
+}
+
+// GetSizeInBytes wraps Viper for concurrent access
+func (c *safeConfig) GetSizeInBytes(key string) uint {
+ c.RLock()
+ defer c.RUnlock()
+ c.checkKnownKey(key)
+ val, err := c.Viper.GetSizeInBytesE(key)
+ if err != nil {
+ log.Warnf("failed to get configuration value for key %q: %s", key, err)
+ }
+ return val
+}
+
+// GetSource wraps Viper for concurrent access
+func (c *safeConfig) GetSource(key string) Source {
+ c.RLock()
+ defer c.RUnlock()
+ c.checkKnownKey(key)
+ var source Source
+ for _, s := range sources {
+ if c.configSources[s].Get(key) != nil {
+ source = s
+ }
+ }
+ return source
+}
+
+// SetEnvPrefix wraps Viper for concurrent access, and keeps the envPrefix for
+// future reference
+func (c *safeConfig) SetEnvPrefix(in string) {
+ c.Lock()
+ defer c.Unlock()
+ c.configSources[SourceEnvVar].SetEnvPrefix(in)
+ c.Viper.SetEnvPrefix(in)
+ c.envPrefix = in
+}
+
+// mergeWithEnvPrefix derives the environment variable that Viper will use for a given key.
+// mergeWithEnvPrefix must be called while holding the config log (read or write).
+func (c *safeConfig) mergeWithEnvPrefix(key string) string {
+ return strings.Join([]string{c.envPrefix, strings.ToUpper(key)}, "_")
+}
+
+// BindEnv wraps Viper for concurrent access, and adds tracking of the configurable env vars
+func (c *safeConfig) BindEnv(input ...string) {
+ c.Lock()
+ defer c.Unlock()
+ var envKeys []string
+
+ // If one input is given, viper derives an env key from it; otherwise, all inputs after
+ // the first are literal env vars.
+ if len(input) == 1 {
+ envKeys = []string{c.mergeWithEnvPrefix(input[0])}
+ } else {
+ envKeys = input[1:]
+ }
+
+ for _, key := range envKeys {
+ // apply EnvKeyReplacer to each key
+ if c.envKeyReplacer != nil {
+ key = c.envKeyReplacer.Replace(key)
+ }
+ c.configEnvVars[key] = struct{}{}
+ }
+
+ _ = c.configSources[SourceEnvVar].BindEnv(input...)
+ _ = c.Viper.BindEnv(input...)
+}
+
+// SetEnvKeyReplacer wraps Viper for concurrent access
+func (c *safeConfig) SetEnvKeyReplacer(r *strings.Replacer) {
+ c.Lock()
+ defer c.Unlock()
+ c.configSources[SourceEnvVar].SetEnvKeyReplacer(r)
+ c.Viper.SetEnvKeyReplacer(r)
+ c.envKeyReplacer = r
+}
+
+// UnmarshalKey wraps Viper for concurrent access
+func (c *safeConfig) UnmarshalKey(key string, rawVal interface{}, opts ...viper.DecoderConfigOption) error {
+ c.RLock()
+ defer c.RUnlock()
+ c.checkKnownKey(key)
+ return c.Viper.UnmarshalKey(key, rawVal, opts...)
+}
+
+// Unmarshal wraps Viper for concurrent access
+func (c *safeConfig) Unmarshal(rawVal interface{}) error {
+ c.RLock()
+ defer c.RUnlock()
+ return c.Viper.Unmarshal(rawVal)
+}
+
+// UnmarshalExact wraps Viper for concurrent access
+func (c *safeConfig) UnmarshalExact(rawVal interface{}) error {
+ c.RLock()
+ defer c.RUnlock()
+ return c.Viper.UnmarshalExact(rawVal)
+}
+
+// ReadInConfig wraps Viper for concurrent access
+func (c *safeConfig) ReadInConfig() error {
+ c.Lock()
+ defer c.Unlock()
+ err := c.Viper.ReadInConfig()
+ if err != nil {
+ return err
+ }
+ return c.configSources[SourceFile].ReadInConfig()
+}
+
+// ReadConfig wraps Viper for concurrent access
+func (c *safeConfig) ReadConfig(in io.Reader) error {
+ c.Lock()
+ defer c.Unlock()
+ b, err := io.ReadAll(in)
+ if err != nil {
+ return err
+ }
+ err = c.Viper.ReadConfig(bytes.NewReader(b))
+ if err != nil {
+ return err
+ }
+ return c.configSources[SourceFile].ReadConfig(bytes.NewReader(b))
+}
+
+// MergeConfig wraps Viper for concurrent access
+func (c *safeConfig) MergeConfig(in io.Reader) error {
+ c.Lock()
+ defer c.Unlock()
+ return c.Viper.MergeConfig(in)
+}
+
+// MergeConfigMap merges the configuration from the map given with an existing config.
+// Note that the map given may be modified.
+func (c *safeConfig) MergeConfigMap(cfg map[string]any) error {
+ c.Lock()
+ defer c.Unlock()
+ return c.Viper.MergeConfigMap(cfg)
+}
+
+// AllSettings wraps Viper for concurrent access
+func (c *safeConfig) AllSettings() map[string]interface{} {
+ c.RLock()
+ defer c.RUnlock()
+
+ // AllSettings returns a fresh map, so the caller may do with it
+ // as they please without holding the lock.
+ return c.Viper.AllSettings()
+}
+
+// AllSettingsWithoutDefault wraps Viper for concurrent access
+func (c *safeConfig) AllSettingsWithoutDefault() map[string]interface{} {
+ c.RLock()
+ defer c.RUnlock()
+
+ // AllSettingsWithoutDefault returns a fresh map, so the caller may do with it
+ // as they please without holding the lock.
+ return c.Viper.AllSettingsWithoutDefault()
+}
+
+// AllSourceSettingsWithoutDefault wraps Viper for concurrent access
+func (c *safeConfig) AllSourceSettingsWithoutDefault(source Source) map[string]interface{} {
+ c.RLock()
+ defer c.RUnlock()
+
+ // AllSourceSettingsWithoutDefault returns a fresh map, so the caller may do with it
+ // as they please without holding the lock.
+ return c.configSources[source].AllSettingsWithoutDefault()
+}
+
+// AddConfigPath wraps Viper for concurrent access
+func (c *safeConfig) AddConfigPath(in string) {
+ c.Lock()
+ defer c.Unlock()
+ c.configSources[SourceFile].AddConfigPath(in)
+ c.Viper.AddConfigPath(in)
+}
+
+// SetConfigName wraps Viper for concurrent access
+func (c *safeConfig) SetConfigName(in string) {
+ c.Lock()
+ defer c.Unlock()
+ c.configSources[SourceFile].SetConfigName(in)
+ c.Viper.SetConfigName(in)
+}
+
+// SetConfigFile wraps Viper for concurrent access
+func (c *safeConfig) SetConfigFile(in string) {
+ c.Lock()
+ defer c.Unlock()
+ c.configSources[SourceFile].SetConfigFile(in)
+ c.Viper.SetConfigFile(in)
+}
+
+// SetConfigType wraps Viper for concurrent access
+func (c *safeConfig) SetConfigType(in string) {
+ c.Lock()
+ defer c.Unlock()
+ c.configSources[SourceFile].SetConfigType(in)
+ c.Viper.SetConfigType(in)
+}
+
+// ConfigFileUsed wraps Viper for concurrent access
+func (c *safeConfig) ConfigFileUsed() string {
+ c.RLock()
+ defer c.RUnlock()
+ return c.Viper.ConfigFileUsed()
+}
+
+func (c *safeConfig) SetTypeByDefaultValue(in bool) {
+ c.Lock()
+ defer c.Unlock()
+ for _, source := range sources {
+ c.configSources[source].SetTypeByDefaultValue(in)
+ }
+ c.Viper.SetTypeByDefaultValue(in)
+}
+
+// BindPFlag wraps Viper for concurrent access
+func (c *safeConfig) BindPFlag(key string, flag *pflag.Flag) error {
+ c.Lock()
+ defer c.Unlock()
+ return c.Viper.BindPFlag(key, flag)
+}
+
+// GetEnvVars implements the Config interface
+func (c *safeConfig) GetEnvVars() []string {
+ c.RLock()
+ defer c.RUnlock()
+ vars := make([]string, 0, len(c.configEnvVars))
+ for v := range c.configEnvVars {
+ vars = append(vars, v)
+ }
+ return vars
+}
+
+// BindEnvAndSetDefault implements the Config interface
+func (c *safeConfig) BindEnvAndSetDefault(key string, val interface{}, env ...string) {
+ c.SetDefault(key, val)
+ c.BindEnv(append([]string{key}, env...)...) //nolint:errcheck
+}
+
+func (c *safeConfig) Warnings() *Warnings {
+ return nil
+}
+
+func (c *safeConfig) Object() Reader {
+ return c
+}
+
+// NewConfig returns a new Config object.
+func NewConfig(name string, envPrefix string, envKeyReplacer *strings.Replacer) Config {
+ config := safeConfig{
+ Viper: viper.New(),
+ configSources: map[Source]*viper.Viper{},
+ configEnvVars: map[string]struct{}{},
+ unknownKeys: map[string]struct{}{},
+ }
+
+ // load one Viper instance per source of setting change
+ for _, source := range sources {
+ config.configSources[source] = viper.New()
+ }
+
+ config.SetTypeByDefaultValue(true)
+ config.SetConfigName(name)
+ config.SetEnvPrefix(envPrefix)
+ config.SetEnvKeyReplacer(envKeyReplacer)
+
+ return &config
+}
+
+// CopyConfig copies the given config to the receiver config. This should only be used in tests as replacing
+// the global config reference is unsafe.
+func (c *safeConfig) CopyConfig(cfg Config) {
+ c.Lock()
+ defer c.Unlock()
+
+ if cfg, ok := cfg.(*safeConfig); ok {
+ c.Viper = cfg.Viper
+ c.configSources = cfg.configSources
+ c.envPrefix = cfg.envPrefix
+ c.envKeyReplacer = cfg.envKeyReplacer
+ c.configEnvVars = cfg.configEnvVars
+ c.unknownKeys = cfg.unknownKeys
+ return
+ }
+ panic("Replacement config must be an instance of safeConfig")
+}
+
+// GetProxies returns the proxy settings from the configuration
+func (c *safeConfig) GetProxies() *Proxy {
+ c.proxiesOnce.Do(func() {
+ if c.GetBool("fips.enabled") {
+ return
+ }
+ if !c.IsSet("proxy.http") && !c.IsSet("proxy.https") && !c.IsSet("proxy.no_proxy") {
+ return
+ }
+ p := &Proxy{
+ HTTP: c.GetString("proxy.http"),
+ HTTPS: c.GetString("proxy.https"),
+ NoProxy: c.GetStringSlice("proxy.no_proxy"),
+ }
+
+ c.proxies = p
+ })
+ return c.proxies
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/model/warnings.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/model/warnings.go
new file mode 100644
index 0000000000..7c85d06b60
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/model/warnings.go
@@ -0,0 +1,12 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package model
+
+// Warnings represent the warnings in the config
+type Warnings struct {
+ TraceMallocEnabledWithPy2 bool
+ Err error
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/apm.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/apm.go
new file mode 100644
index 0000000000..026dfe1480
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/apm.go
@@ -0,0 +1,257 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package setup
+
+import (
+ "encoding/csv"
+ "encoding/json"
+ "fmt"
+ "runtime"
+ "strconv"
+ "strings"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+// Traces specifies the data type used for Vector override. See https://vector.dev/docs/reference/configuration/sources/datadog_agent/ for additional details.
+const Traces DataType = "traces"
+
+func setupAPM(config pkgconfigmodel.Config) {
+ config.BindEnv("apm_config.obfuscation.elasticsearch.enabled", "DD_APM_OBFUSCATION_ELASTICSEARCH_ENABLED")
+ config.BindEnv("apm_config.obfuscation.elasticsearch.keep_values", "DD_APM_OBFUSCATION_ELASTICSEARCH_KEEP_VALUES")
+ config.BindEnv("apm_config.obfuscation.elasticsearch.obfuscate_sql_values", "DD_APM_OBFUSCATION_ELASTICSEARCH_OBFUSCATE_SQL_VALUES")
+ config.BindEnv("apm_config.obfuscation.mongodb.enabled", "DD_APM_OBFUSCATION_MONGODB_ENABLED")
+ config.BindEnv("apm_config.obfuscation.mongodb.keep_values", "DD_APM_OBFUSCATION_MONGODB_KEEP_VALUES")
+ config.BindEnv("apm_config.obfuscation.mongodb.obfuscate_sql_values", "DD_APM_OBFUSCATION_MONGODB_OBFUSCATE_SQL_VALUES")
+ config.BindEnv("apm_config.obfuscation.sql_exec_plan.enabled", "DD_APM_OBFUSCATION_SQL_EXEC_PLAN_ENABLED")
+ config.BindEnv("apm_config.obfuscation.sql_exec_plan.keep_values", "DD_APM_OBFUSCATION_SQL_EXEC_PLAN_KEEP_VALUES")
+ config.BindEnv("apm_config.obfuscation.sql_exec_plan.obfuscate_sql_values", "DD_APM_OBFUSCATION_SQL_EXEC_PLAN_OBFUSCATE_SQL_VALUES")
+ config.BindEnv("apm_config.obfuscation.sql_exec_plan_normalize.enabled", "DD_APM_OBFUSCATION_SQL_EXEC_PLAN_NORMALIZE_ENABLED")
+ config.BindEnv("apm_config.obfuscation.sql_exec_plan_normalize.keep_values", "DD_APM_OBFUSCATION_SQL_EXEC_PLAN_NORMALIZE_KEEP_VALUES")
+ config.BindEnv("apm_config.obfuscation.sql_exec_plan_normalize.obfuscate_sql_values", "DD_APM_OBFUSCATION_SQL_EXEC_PLAN_NORMALIZE_OBFUSCATE_SQL_VALUES")
+ config.BindEnv("apm_config.obfuscation.http.remove_query_string", "DD_APM_OBFUSCATION_HTTP_REMOVE_QUERY_STRING")
+ config.BindEnv("apm_config.obfuscation.http.remove_paths_with_digits", "DD_APM_OBFUSCATION_HTTP_REMOVE_PATHS_WITH_DIGITS")
+ config.BindEnv("apm_config.obfuscation.remove_stack_traces", "DD_APM_OBFUSCATION_REMOVE_STACK_TRACES")
+ config.BindEnv("apm_config.obfuscation.redis.enabled", "DD_APM_OBFUSCATION_REDIS_ENABLED")
+ config.BindEnv("apm_config.obfuscation.redis.remove_all_args", "DD_APM_OBFUSCATION_REDIS_REMOVE_ALL_ARGS")
+ config.BindEnv("apm_config.obfuscation.memcached.enabled", "DD_APM_OBFUSCATION_MEMCACHED_ENABLED")
+ config.BindEnv("apm_config.obfuscation.memcached.keep_command", "DD_APM_OBFUSCATION_MEMCACHED_KEEP_COMMAND")
+ config.SetKnown("apm_config.filter_tags.require")
+ config.SetKnown("apm_config.filter_tags.reject")
+ config.SetKnown("apm_config.filter_tags_regex.require")
+ config.SetKnown("apm_config.filter_tags_regex.reject")
+ config.SetKnown("apm_config.extra_sample_rate")
+ config.SetKnown("apm_config.dd_agent_bin")
+ config.SetKnown("apm_config.trace_writer.connection_limit")
+ config.SetKnown("apm_config.trace_writer.queue_size")
+ config.SetKnown("apm_config.service_writer.connection_limit")
+ config.SetKnown("apm_config.service_writer.queue_size")
+ config.SetKnown("apm_config.stats_writer.connection_limit")
+ config.SetKnown("apm_config.stats_writer.queue_size")
+ config.SetKnown("apm_config.analyzed_rate_by_service.*")
+ config.SetKnown("apm_config.bucket_size_seconds")
+ config.SetKnown("apm_config.watchdog_check_delay")
+ config.SetKnown("apm_config.sync_flushing")
+ config.SetKnown("apm_config.features")
+ config.SetKnown("apm_config.max_catalog_entries")
+
+ bindVectorOptions(config, Traces)
+
+ if runtime.GOARCH == "386" && runtime.GOOS == "windows" {
+ // on Windows-32 bit, the trace agent isn't installed. Set the default to disabled
+ // so that there aren't messages in the log about failing to start.
+ config.BindEnvAndSetDefault("apm_config.enabled", false, "DD_APM_ENABLED")
+ } else {
+ config.BindEnvAndSetDefault("apm_config.enabled", true, "DD_APM_ENABLED")
+ }
+
+ config.BindEnvAndSetDefault("apm_config.receiver_port", 8126, "DD_APM_RECEIVER_PORT", "DD_RECEIVER_PORT")
+ config.BindEnvAndSetDefault("apm_config.windows_pipe_buffer_size", 1_000_000, "DD_APM_WINDOWS_PIPE_BUFFER_SIZE") //nolint:errcheck
+ config.BindEnvAndSetDefault("apm_config.windows_pipe_security_descriptor", "D:AI(A;;GA;;;WD)", "DD_APM_WINDOWS_PIPE_SECURITY_DESCRIPTOR") //nolint:errcheck
+ config.BindEnvAndSetDefault("apm_config.remote_tagger", true, "DD_APM_REMOTE_TAGGER") //nolint:errcheck
+ config.BindEnvAndSetDefault("apm_config.peer_service_aggregation", false, "DD_APM_PEER_SERVICE_AGGREGATION") //nolint:errcheck
+ config.BindEnvAndSetDefault("apm_config.peer_tags_aggregation", false, "DD_APM_PEER_TAGS_AGGREGATION") //nolint:errcheck
+ config.BindEnvAndSetDefault("apm_config.compute_stats_by_span_kind", false, "DD_APM_COMPUTE_STATS_BY_SPAN_KIND") //nolint:errcheck
+ config.BindEnvAndSetDefault("apm_config.instrumentation.enabled", false, "DD_APM_INSTRUMENTATION_ENABLED")
+ config.BindEnvAndSetDefault("apm_config.instrumentation.enabled_namespaces", []string{}, "DD_APM_INSTRUMENTATION_ENABLED_NAMESPACES")
+ config.BindEnvAndSetDefault("apm_config.instrumentation.disabled_namespaces", []string{}, "DD_APM_INSTRUMENTATION_DISABLED_NAMESPACES")
+ config.BindEnvAndSetDefault("apm_config.instrumentation.lib_versions", map[string]string{}, "DD_APM_INSTRUMENTATION_LIB_VERSIONS")
+
+ config.BindEnv("apm_config.max_catalog_services", "DD_APM_MAX_CATALOG_SERVICES")
+ config.BindEnv("apm_config.receiver_timeout", "DD_APM_RECEIVER_TIMEOUT")
+ config.BindEnv("apm_config.max_payload_size", "DD_APM_MAX_PAYLOAD_SIZE")
+ config.BindEnv("apm_config.trace_buffer", "DD_APM_TRACE_BUFFER")
+ config.BindEnv("apm_config.decoders", "DD_APM_DECODERS")
+ config.BindEnv("apm_config.max_connections", "DD_APM_MAX_CONNECTIONS")
+ config.BindEnv("apm_config.decoder_timeout", "DD_APM_DECODER_TIMEOUT")
+ config.BindEnv("apm_config.log_file", "DD_APM_LOG_FILE")
+ config.BindEnv("apm_config.max_events_per_second", "DD_APM_MAX_EPS", "DD_MAX_EPS")
+ config.BindEnv("apm_config.max_traces_per_second", "DD_APM_MAX_TPS", "DD_MAX_TPS")
+ config.BindEnv("apm_config.errors_per_second", "DD_APM_ERROR_TPS")
+ config.BindEnv("apm_config.enable_rare_sampler", "DD_APM_ENABLE_RARE_SAMPLER")
+ config.BindEnv("apm_config.disable_rare_sampler", "DD_APM_DISABLE_RARE_SAMPLER") //Deprecated
+ config.BindEnv("apm_config.max_remote_traces_per_second", "DD_APM_MAX_REMOTE_TPS")
+ config.BindEnv("apm_config.probabilistic_sampler.enabled", "DD_APM_PROBABILISTIC_SAMPLER_ENABLED")
+ config.BindEnv("apm_config.probabilistic_sampler.sampling_percentage", "DD_APM_PROBABILISTIC_SAMPLER_SAMPLING_PERCENTAGE")
+ config.BindEnv("apm_config.probabilistic_sampler.hash_seed", "DD_APM_PROBABILISTIC_SAMPLER_HASH_SEED")
+
+ config.BindEnv("apm_config.max_memory", "DD_APM_MAX_MEMORY")
+ config.BindEnv("apm_config.max_cpu_percent", "DD_APM_MAX_CPU_PERCENT")
+ config.BindEnv("apm_config.env", "DD_APM_ENV")
+ config.BindEnv("apm_config.apm_non_local_traffic", "DD_APM_NON_LOCAL_TRAFFIC")
+ config.BindEnv("apm_config.apm_dd_url", "DD_APM_DD_URL")
+ config.BindEnv("apm_config.connection_limit", "DD_APM_CONNECTION_LIMIT", "DD_CONNECTION_LIMIT")
+ config.BindEnv("apm_config.connection_reset_interval", "DD_APM_CONNECTION_RESET_INTERVAL")
+ config.BindEnv("apm_config.max_sender_retries", "DD_APM_MAX_SENDER_RETRIES")
+ config.BindEnv("apm_config.profiling_dd_url", "DD_APM_PROFILING_DD_URL")
+ config.BindEnv("apm_config.profiling_additional_endpoints", "DD_APM_PROFILING_ADDITIONAL_ENDPOINTS")
+ config.BindEnv("apm_config.additional_endpoints", "DD_APM_ADDITIONAL_ENDPOINTS")
+ config.BindEnv("apm_config.replace_tags", "DD_APM_REPLACE_TAGS")
+ config.BindEnv("apm_config.analyzed_spans", "DD_APM_ANALYZED_SPANS")
+ config.BindEnv("apm_config.ignore_resources", "DD_APM_IGNORE_RESOURCES", "DD_IGNORE_RESOURCE")
+ config.BindEnv("apm_config.receiver_socket", "DD_APM_RECEIVER_SOCKET")
+ config.BindEnv("apm_config.windows_pipe_name", "DD_APM_WINDOWS_PIPE_NAME")
+ config.BindEnv("apm_config.sync_flushing", "DD_APM_SYNC_FLUSHING")
+ config.BindEnv("apm_config.filter_tags.require", "DD_APM_FILTER_TAGS_REQUIRE")
+ config.BindEnv("apm_config.filter_tags.reject", "DD_APM_FILTER_TAGS_REJECT")
+ config.BindEnv("apm_config.filter_tags_regex.reject", "DD_APM_FILTER_TAGS_REGEX_REJECT")
+ config.BindEnv("apm_config.filter_tags_regex.require", "DD_APM_FILTER_TAGS_REGEX_REQUIRE")
+ config.BindEnv("apm_config.internal_profiling.enabled", "DD_APM_INTERNAL_PROFILING_ENABLED")
+ config.BindEnv("apm_config.debugger_dd_url", "DD_APM_DEBUGGER_DD_URL")
+ config.BindEnv("apm_config.debugger_api_key", "DD_APM_DEBUGGER_API_KEY")
+ config.BindEnv("apm_config.debugger_additional_endpoints", "DD_APM_DEBUGGER_ADDITIONAL_ENDPOINTS")
+ config.BindEnv("apm_config.debugger_diagnostics_dd_url", "DD_APM_DEBUGGER_DIAGNOSTICS_DD_URL")
+ config.BindEnv("apm_config.debugger_diagnostics_api_key", "DD_APM_DEBUGGER_DIAGNOSTICS_API_KEY")
+ config.BindEnv("apm_config.debugger_diagnostics_additional_endpoints", "DD_APM_DEBUGGER_DIAGNOSTICS_ADDITIONAL_ENDPOINTS")
+ config.BindEnv("apm_config.symdb_dd_url", "DD_APM_SYMDB_DD_URL")
+ config.BindEnv("apm_config.symdb_api_key", "DD_APM_SYMDB_API_KEY")
+ config.BindEnv("apm_config.symdb_additional_endpoints", "DD_APM_SYMDB_ADDITIONAL_ENDPOINTS")
+ config.BindEnvAndSetDefault("apm_config.telemetry.enabled", true, "DD_APM_TELEMETRY_ENABLED")
+ config.BindEnv("apm_config.telemetry.dd_url", "DD_APM_TELEMETRY_DD_URL")
+ config.BindEnv("apm_config.telemetry.additional_endpoints", "DD_APM_TELEMETRY_ADDITIONAL_ENDPOINTS")
+ config.BindEnv("apm_config.install_id", "DD_INSTRUMENTATION_INSTALL_ID")
+ config.BindEnv("apm_config.install_type", "DD_INSTRUMENTATION_INSTALL_TYPE")
+ config.BindEnv("apm_config.install_time", "DD_INSTRUMENTATION_INSTALL_TIME")
+ config.BindEnv("apm_config.obfuscation.credit_cards.enabled", "DD_APM_OBFUSCATION_CREDIT_CARDS_ENABLED")
+ config.BindEnv("apm_config.obfuscation.credit_cards.luhn", "DD_APM_OBFUSCATION_CREDIT_CARDS_LUHN")
+ config.BindEnvAndSetDefault("apm_config.debug.port", 5012, "DD_APM_DEBUG_PORT")
+ config.BindEnv("apm_config.features", "DD_APM_FEATURES")
+ config.SetEnvKeyTransformer("apm_config.features", func(s string) interface{} {
+ // Either commas or spaces can be used as separators.
+ // Comma takes precedence as it was the only supported separator in the past.
+ // Mixing separators is not supported.
+ var res []string
+ if strings.ContainsRune(s, ',') {
+ res = strings.Split(s, ",")
+ } else {
+ res = strings.Split(s, " ")
+ }
+ for i, v := range res {
+ res[i] = strings.TrimSpace(v)
+ }
+ return res
+ })
+
+ config.SetEnvKeyTransformer("apm_config.ignore_resources", func(in string) interface{} {
+ r, err := splitCSVString(in, ',')
+ if err != nil {
+ log.Warnf(`"apm_config.ignore_resources" can not be parsed: %v`, err)
+ return []string{}
+ }
+ return r
+ })
+
+ config.SetEnvKeyTransformer("apm_config.filter_tags.require", parseKVList("apm_config.filter_tags.require"))
+
+ config.SetEnvKeyTransformer("apm_config.filter_tags.reject", parseKVList("apm_config.filter_tags.reject"))
+
+ config.SetEnvKeyTransformer("apm_config.filter_tags_regex.require", parseKVList("apm_config.filter_tags_regex.require"))
+
+ config.SetEnvKeyTransformer("apm_config.filter_tags_regex.reject", parseKVList("apm_config.filter_tags_regex.reject"))
+
+ config.SetEnvKeyTransformer("apm_config.replace_tags", func(in string) interface{} {
+ var out []map[string]string
+ if err := json.Unmarshal([]byte(in), &out); err != nil {
+ log.Warnf(`"apm_config.replace_tags" can not be parsed: %v`, err)
+ }
+ return out
+ })
+
+ config.SetEnvKeyTransformer("apm_config.analyzed_spans", func(in string) interface{} {
+ out, err := parseAnalyzedSpans(in)
+ if err != nil {
+ log.Errorf(`Bad format for "apm_config.analyzed_spans" it should be of the form \"service_name|operation_name=rate,other_service|other_operation=rate\", error: %v`, err)
+ }
+ return out
+ })
+
+ config.BindEnv("apm_config.peer_tags", "DD_APM_PEER_TAGS")
+ config.SetEnvKeyTransformer("apm_config.peer_tags", func(in string) interface{} {
+ var out []string
+ if err := json.Unmarshal([]byte(in), &out); err != nil {
+ log.Warnf(`"apm_config.peer_tags" can not be parsed: %v`, err)
+ }
+ return out
+ })
+}
+
+func parseKVList(key string) func(string) interface{} {
+ return func(in string) interface{} {
+ if len(in) == 0 {
+ return []string{}
+ }
+ if in[0] != '[' {
+ return strings.Split(in, " ")
+ }
+ // '[' as a first character signals JSON array format
+ var values []string
+ if err := json.Unmarshal([]byte(in), &values); err != nil {
+ log.Warnf(`"%s" can not be parsed: %v`, key, err)
+ return []string{}
+ }
+ return values
+ }
+}
+
+func splitCSVString(s string, sep rune) ([]string, error) {
+ r := csv.NewReader(strings.NewReader(s))
+ r.TrimLeadingSpace = true
+ r.LazyQuotes = true
+ r.Comma = sep
+
+ return r.Read()
+}
+
+func parseNameAndRate(token string) (string, float64, error) {
+ parts := strings.Split(token, "=")
+ if len(parts) != 2 {
+ return "", 0, fmt.Errorf("Bad format")
+ }
+ rate, err := strconv.ParseFloat(parts[1], 64)
+ if err != nil {
+ return "", 0, fmt.Errorf("Unabled to parse rate")
+ }
+ return parts[0], rate, nil
+}
+
+// parseAnalyzedSpans parses the env string to extract a map of spans to be analyzed by service and operation.
+// the format is: service_name|operation_name=rate,other_service|other_operation=rate
+func parseAnalyzedSpans(env string) (analyzedSpans map[string]interface{}, err error) {
+ analyzedSpans = make(map[string]interface{})
+ if env == "" {
+ return
+ }
+ tokens := strings.Split(env, ",")
+ for _, token := range tokens {
+ name, rate, err := parseNameAndRate(token)
+ if err != nil {
+ return nil, err
+ }
+ analyzedSpans[name] = rate
+ }
+ return
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/config.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/config.go
new file mode 100644
index 0000000000..dc3e50d8c6
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/config.go
@@ -0,0 +1,2307 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package setup defines the configuration of the agent
+package setup
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "os"
+ "path/filepath"
+ "runtime"
+ "slices"
+ "strconv"
+ "strings"
+ "time"
+
+ "gopkg.in/yaml.v2"
+
+ "github.com/DataDog/datadog-agent/comp/core/secrets"
+ "github.com/DataDog/datadog-agent/pkg/collector/check/defaults"
+ pkgconfigenv "github.com/DataDog/datadog-agent/pkg/config/env"
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ "github.com/DataDog/datadog-agent/pkg/util/hostname/validate"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+ "github.com/DataDog/datadog-agent/pkg/util/optional"
+ "github.com/DataDog/datadog-agent/pkg/util/system"
+)
+
+const (
+
+ // DefaultSite is the default site the Agent sends data to.
+ DefaultSite = "datadoghq.com"
+
+ // DefaultNumWorkers default number of workers for our check runner
+ DefaultNumWorkers = 4
+ // MaxNumWorkers maximum number of workers for our check runner
+ MaxNumWorkers = 25
+ // DefaultAPIKeyValidationInterval is the default interval of api key validation checks
+ DefaultAPIKeyValidationInterval = 60
+
+ // DefaultForwarderRecoveryInterval is the default recovery interval,
+ // also used if the user-provided value is invalid.
+ DefaultForwarderRecoveryInterval = 2
+
+ megaByte = 1024 * 1024
+
+ // DefaultBatchWait is the default HTTP batch wait in second for logs
+ DefaultBatchWait = 5
+
+ // DefaultBatchMaxConcurrentSend is the default HTTP batch max concurrent send for logs
+ DefaultBatchMaxConcurrentSend = 0
+
+ // DefaultBatchMaxSize is the default HTTP batch max size (maximum number of events in a single batch) for logs
+ DefaultBatchMaxSize = 1000
+
+ // DefaultInputChanSize is the default input chan size for events
+ DefaultInputChanSize = 100
+
+ // DefaultBatchMaxContentSize is the default HTTP batch max content size (before compression) for logs
+ // It is also the maximum possible size of a single event. Events exceeding this limit are dropped.
+ DefaultBatchMaxContentSize = 5000000
+
+ // DefaultAuditorTTL is the default logs auditor TTL in hours
+ DefaultAuditorTTL = 23
+
+ // DefaultRuntimePoliciesDir is the default policies directory used by the runtime security module
+ DefaultRuntimePoliciesDir = "/etc/datadog-agent/runtime-security.d"
+
+ // DefaultCompressorKind is the default compressor. Options available are 'zlib' and 'zstd'
+ DefaultCompressorKind = "zlib"
+
+ // DefaultLogsSenderBackoffFactor is the default logs sender backoff randomness factor
+ DefaultLogsSenderBackoffFactor = 2.0
+
+ // DefaultLogsSenderBackoffBase is the default logs sender base backoff time, seconds
+ DefaultLogsSenderBackoffBase = 1.0
+
+ // DefaultLogsSenderBackoffMax is the default logs sender maximum backoff time, seconds
+ DefaultLogsSenderBackoffMax = 120.0
+
+ // DefaultLogsSenderBackoffRecoveryInterval is the default logs sender backoff recovery interval
+ DefaultLogsSenderBackoffRecoveryInterval = 2
+
+ // maxExternalMetricsProviderChunkSize ensures batch queries are limited in size.
+ maxExternalMetricsProviderChunkSize = 35
+
+ // DefaultLocalProcessCollectorInterval is the interval at which processes are collected and sent to the workloadmeta
+ // in the core agent if the process check is disabled.
+ DefaultLocalProcessCollectorInterval = 1 * time.Minute
+
+ // DefaultMaxMessageSizeBytes is the default value for max_message_size_bytes
+ // If a log message is larger than this byte limit, the overflow bytes will be truncated.
+ DefaultMaxMessageSizeBytes = 256 * 1000
+)
+
+// Datadog is the global configuration object
+var (
+ Datadog pkgconfigmodel.Config
+ SystemProbe pkgconfigmodel.Config
+)
+
+// Variables to initialize at build time
+var (
+ DefaultPython string
+
+ // ForceDefaultPython has its value set to true at compile time if we should ignore
+ // the Python version set in the configuration and use `DefaultPython` instead.
+ // We use this to force Python 3 in the Agent 7 as it's the only one available.
+ ForceDefaultPython string
+)
+
+// Variables to initialize at start time
+var (
+ // StartTime is the agent startup time
+ StartTime = time.Now()
+
+ // DefaultSecurityProfilesDir is the default directory used to store Security Profiles by the runtime security module
+ DefaultSecurityProfilesDir = filepath.Join(defaultRunPath, "runtime-security", "profiles")
+)
+
+// List of integrations allowed to be configured by RC by default
+var defaultAllowedRCIntegrations = []string{}
+
+// ConfigurationProviders helps unmarshalling `config_providers` config param
+type ConfigurationProviders struct {
+ Name string `mapstructure:"name"`
+ Polling bool `mapstructure:"polling"`
+ PollInterval string `mapstructure:"poll_interval"`
+ TemplateURL string `mapstructure:"template_url"`
+ TemplateDir string `mapstructure:"template_dir"`
+ Username string `mapstructure:"username"`
+ Password string `mapstructure:"password"`
+ CAFile string `mapstructure:"ca_file"`
+ CAPath string `mapstructure:"ca_path"`
+ CertFile string `mapstructure:"cert_file"`
+ KeyFile string `mapstructure:"key_file"`
+ Token string `mapstructure:"token"`
+ GraceTimeSeconds int `mapstructure:"grace_time_seconds"`
+ DegradedDeadlineMinutes int `mapstructure:"degraded_deadline_minutes"`
+}
+
+// Listeners helps unmarshalling `listeners` config param
+type Listeners struct {
+ Name string `mapstructure:"name"`
+ EnabledProviders map[string]struct{}
+}
+
+// SetEnabledProviders registers the enabled config providers in the listener config
+func (l *Listeners) SetEnabledProviders(ep map[string]struct{}) {
+ l.EnabledProviders = ep
+}
+
+// IsProviderEnabled returns whether a config provider is enabled
+func (l *Listeners) IsProviderEnabled(provider string) bool {
+ _, found := l.EnabledProviders[provider]
+
+ return found
+}
+
+// MappingProfile represent a group of mappings
+type MappingProfile struct {
+ Name string `mapstructure:"name" json:"name" yaml:"name"`
+ Prefix string `mapstructure:"prefix" json:"prefix" yaml:"prefix"`
+ Mappings []MetricMapping `mapstructure:"mappings" json:"mappings" yaml:"mappings"`
+}
+
+// MetricMapping represent one mapping rule
+type MetricMapping struct {
+ Match string `mapstructure:"match" json:"match" yaml:"match"`
+ MatchType string `mapstructure:"match_type" json:"match_type" yaml:"match_type"`
+ Name string `mapstructure:"name" json:"name" yaml:"name"`
+ Tags map[string]string `mapstructure:"tags" json:"tags" yaml:"tags"`
+}
+
+// DataType represent the generic data type (e.g. metrics, logs) that can be sent by the Agent
+type DataType string
+
+const (
+ // Metrics type covers series & sketches
+ Metrics DataType = "metrics"
+ // Logs type covers all outgoing logs
+ Logs DataType = "logs"
+)
+
+func init() {
+ osinit()
+ // Configure Datadog global configuration
+ Datadog = pkgconfigmodel.NewConfig("datadog", "DD", strings.NewReplacer(".", "_"))
+ SystemProbe = pkgconfigmodel.NewConfig("system-probe", "DD", strings.NewReplacer(".", "_"))
+ // Configuration defaults
+ InitConfig(Datadog)
+ InitSystemProbeConfig(SystemProbe)
+}
+
+// InitConfig initializes the config defaults on a config
+func InitConfig(config pkgconfigmodel.Config) {
+ // Agent
+ // Don't set a default on 'site' to allow detecting with viper whether it's set in config
+ config.BindEnv("site")
+ config.BindEnv("dd_url", "DD_DD_URL", "DD_URL")
+ config.BindEnvAndSetDefault("app_key", "")
+ config.BindEnvAndSetDefault("cloud_provider_metadata", []string{"aws", "gcp", "azure", "alibaba", "oracle", "ibm"})
+ config.SetDefault("proxy", nil)
+ config.BindEnvAndSetDefault("skip_ssl_validation", false)
+ config.BindEnvAndSetDefault("sslkeylogfile", "")
+ config.BindEnv("tls_handshake_timeout")
+ config.BindEnvAndSetDefault("hostname", "")
+ config.BindEnvAndSetDefault("hostname_file", "")
+ config.BindEnvAndSetDefault("tags", []string{})
+ config.BindEnvAndSetDefault("extra_tags", []string{})
+ // If enabled, all origin detection mechanisms will be unified to use the same logic.
+ // Will override all other origin detection settings in favor of the unified one.
+ config.BindEnvAndSetDefault("origin_detection_unified", false)
+ config.BindEnv("env")
+ config.BindEnvAndSetDefault("tag_value_split_separator", map[string]string{})
+ config.BindEnvAndSetDefault("conf_path", ".")
+ config.BindEnvAndSetDefault("confd_path", defaultConfdPath)
+ config.BindEnvAndSetDefault("additional_checksd", defaultAdditionalChecksPath)
+ config.BindEnvAndSetDefault("jmx_log_file", "")
+ // If enabling log_payloads, ensure the log level is set to at least DEBUG to be able to see the logs
+ config.BindEnvAndSetDefault("log_payloads", false)
+ config.BindEnvAndSetDefault("log_file", "")
+ config.BindEnvAndSetDefault("log_file_max_size", "10Mb")
+ config.BindEnvAndSetDefault("log_file_max_rolls", 1)
+ config.BindEnvAndSetDefault("log_level", "info")
+ config.BindEnvAndSetDefault("log_to_syslog", false)
+ config.BindEnvAndSetDefault("log_to_console", true)
+ config.BindEnvAndSetDefault("log_format_rfc3339", false)
+ config.BindEnvAndSetDefault("log_all_goroutines_when_unhealthy", false)
+ config.BindEnvAndSetDefault("logging_frequency", int64(500))
+ config.BindEnvAndSetDefault("disable_file_logging", false)
+ config.BindEnvAndSetDefault("syslog_uri", "")
+ config.BindEnvAndSetDefault("syslog_rfc", false)
+ config.BindEnvAndSetDefault("syslog_pem", "")
+ config.BindEnvAndSetDefault("syslog_key", "")
+ config.BindEnvAndSetDefault("syslog_tls_verify", true)
+ config.BindEnv("ipc_address") // deprecated: use `cmd_host` instead
+ config.BindEnvAndSetDefault("cmd_host", "localhost")
+ config.BindEnvAndSetDefault("cmd_port", 5001)
+ config.BindEnvAndSetDefault("agent_ipc.host", "localhost")
+ config.BindEnvAndSetDefault("agent_ipc.port", 0)
+ config.BindEnvAndSetDefault("agent_ipc.config_refresh_interval", 0)
+ config.BindEnvAndSetDefault("default_integration_http_timeout", 9)
+ config.BindEnvAndSetDefault("integration_tracing", false)
+ config.BindEnvAndSetDefault("integration_tracing_exhaustive", false)
+ config.BindEnvAndSetDefault("integration_profiling", false)
+ config.BindEnvAndSetDefault("integration_check_status_enabled", false)
+ config.BindEnvAndSetDefault("enable_metadata_collection", true)
+ config.BindEnvAndSetDefault("enable_gohai", true)
+ config.BindEnvAndSetDefault("enable_signing_metadata_collection", true)
+ config.BindEnvAndSetDefault("check_runners", int64(4))
+ config.BindEnvAndSetDefault("check_cancel_timeout", 500*time.Millisecond)
+ config.BindEnvAndSetDefault("auth_token_file_path", "")
+ config.BindEnv("bind_host")
+ config.BindEnvAndSetDefault("health_port", int64(0))
+ config.BindEnvAndSetDefault("disable_py3_validation", false)
+ config.BindEnvAndSetDefault("python_version", DefaultPython)
+ config.BindEnvAndSetDefault("win_skip_com_init", false)
+ config.BindEnvAndSetDefault("allow_arbitrary_tags", false)
+ config.BindEnvAndSetDefault("use_proxy_for_cloud_metadata", false)
+ config.BindEnvAndSetDefault("remote_tagger_timeout_seconds", 30)
+
+ // Fips
+ config.BindEnvAndSetDefault("fips.enabled", false)
+ config.BindEnvAndSetDefault("fips.port_range_start", 9803)
+ config.BindEnvAndSetDefault("fips.local_address", "localhost")
+ config.BindEnvAndSetDefault("fips.https", true)
+ config.BindEnvAndSetDefault("fips.tls_verify", true)
+
+ // Remote config
+ config.BindEnvAndSetDefault("remote_configuration.enabled", true)
+ config.BindEnvAndSetDefault("remote_configuration.key", "")
+ config.BindEnv("remote_configuration.api_key")
+ config.BindEnv("remote_configuration.rc_dd_url")
+ config.BindEnvAndSetDefault("remote_configuration.no_tls", false)
+ config.BindEnvAndSetDefault("remote_configuration.no_tls_validation", false)
+ config.BindEnvAndSetDefault("remote_configuration.config_root", "")
+ config.BindEnvAndSetDefault("remote_configuration.director_root", "")
+ config.BindEnv("remote_configuration.refresh_interval")
+ config.BindEnvAndSetDefault("remote_configuration.max_backoff_interval", 5*time.Minute)
+ config.BindEnvAndSetDefault("remote_configuration.clients.ttl_seconds", 30*time.Second)
+ config.BindEnvAndSetDefault("remote_configuration.clients.cache_bypass_limit", 5)
+ // Remote config products
+ config.BindEnvAndSetDefault("remote_configuration.apm_sampling.enabled", true)
+ config.BindEnvAndSetDefault("remote_configuration.agent_integrations.enabled", false)
+ config.BindEnvAndSetDefault("remote_configuration.agent_integrations.allow_list", defaultAllowedRCIntegrations)
+ config.BindEnvAndSetDefault("remote_configuration.agent_integrations.block_list", []string{})
+ config.BindEnvAndSetDefault("remote_configuration.agent_integrations.allow_log_config_scheduling", false)
+
+ // Auto exit configuration
+ config.BindEnvAndSetDefault("auto_exit.validation_period", 60)
+ config.BindEnvAndSetDefault("auto_exit.noprocess.enabled", false)
+ config.BindEnvAndSetDefault("auto_exit.noprocess.excluded_processes", []string{})
+
+ // The number of commits before expiring a context. The value is 2 to handle
+ // the case where a check miss to send a metric.
+ config.BindEnvAndSetDefault("check_sampler_bucket_commits_count_expiry", 2)
+ // The number of seconds before removing stateful metric data after expiring a
+ // context. Default is 25h, to minimise problems for checks that emit metircs
+ // only occasionally.
+ config.BindEnvAndSetDefault("check_sampler_stateful_metric_expiration_time", 25*time.Hour)
+ config.BindEnvAndSetDefault("check_sampler_expire_metrics", true)
+ config.BindEnvAndSetDefault("check_sampler_context_metrics", false)
+ config.BindEnvAndSetDefault("host_aliases", []string{})
+
+ // overridden in IoT Agent main
+ config.BindEnvAndSetDefault("iot_host", false)
+ // overridden in Heroku buildpack
+ config.BindEnvAndSetDefault("heroku_dyno", false)
+
+ // Debugging + C-land crash feature flags
+ config.BindEnvAndSetDefault("c_stacktrace_collection", false)
+ config.BindEnvAndSetDefault("c_core_dump", false)
+ config.BindEnvAndSetDefault("go_core_dump", false)
+ config.BindEnvAndSetDefault("memtrack_enabled", true)
+ config.BindEnvAndSetDefault("tracemalloc_debug", false)
+ config.BindEnvAndSetDefault("tracemalloc_include", "")
+ config.BindEnvAndSetDefault("tracemalloc_exclude", "")
+ config.BindEnvAndSetDefault("tracemalloc_whitelist", "") // deprecated
+ config.BindEnvAndSetDefault("tracemalloc_blacklist", "") // deprecated
+ config.BindEnvAndSetDefault("run_path", defaultRunPath)
+ config.BindEnv("no_proxy_nonexact_match")
+
+ // Python 3 linter timeout, in seconds
+ // NOTE: linter is notoriously slow, in the absence of a better solution we
+ // can only increase this timeout value. Linting operation is async.
+ config.BindEnvAndSetDefault("python3_linter_timeout", 120)
+
+ // Whether to honour the value of PYTHONPATH, if set, on Windows. On other OSes we always do.
+ config.BindEnvAndSetDefault("windows_use_pythonpath", false)
+
+ // When the Python full interpreter path cannot be deduced via heuristics, the agent
+ // is expected to prevent rtloader from initializing. When set to true, this override
+ // allows us to proceed but with some capabilities unavailable (e.g. `multiprocessing`
+ // library support will not work reliably in those environments)
+ config.BindEnvAndSetDefault("allow_python_path_heuristics_failure", false)
+
+ // if/when the default is changed to true, make the default platform
+ // dependent; default should remain false on Windows to maintain backward
+ // compatibility with Agent5 behavior/win
+ config.BindEnvAndSetDefault("hostname_fqdn", false)
+
+ // When enabled, hostname defined in the configuration (datadog.yaml) and starting with `ip-` or `domu` on EC2 is used as
+ // canonical hostname, otherwise the instance-id is used as canonical hostname.
+ config.BindEnvAndSetDefault("hostname_force_config_as_canonical", false)
+
+ // By default the Agent does not trust the hostname value retrieved from non-root UTS namespace.
+ // When enabled, the Agent will trust the value retrieved from non-root UTS namespace instead of failing
+ // hostname resolution.
+ // (Linux only)
+ config.BindEnvAndSetDefault("hostname_trust_uts_namespace", false)
+
+ config.BindEnvAndSetDefault("cluster_name", "")
+ config.BindEnvAndSetDefault("disable_cluster_name_tag_key", false)
+ config.BindEnvAndSetDefault("enabled_rfc1123_compliant_cluster_name_tag", true)
+
+ // secrets backend
+ config.BindEnvAndSetDefault("secret_backend_command", "")
+ config.BindEnvAndSetDefault("secret_backend_arguments", []string{})
+ config.BindEnvAndSetDefault("secret_backend_output_max_size", 0)
+ config.BindEnvAndSetDefault("secret_backend_timeout", 0)
+ config.BindEnvAndSetDefault("secret_backend_command_allow_group_exec_perm", false)
+ config.BindEnvAndSetDefault("secret_backend_skip_checks", false)
+ config.BindEnvAndSetDefault("secret_backend_remove_trailing_line_break", false)
+ config.BindEnvAndSetDefault("secret_refresh_interval", 0)
+ config.SetDefault("secret_audit_file_max_size", 0)
+
+ // Use to output logs in JSON format
+ config.BindEnvAndSetDefault("log_format_json", false)
+
+ // IPC API server timeout
+ config.BindEnvAndSetDefault("server_timeout", 30)
+
+ // Configuration for TLS for outgoing connections
+ config.BindEnvAndSetDefault("min_tls_version", "tlsv1.2")
+
+ // Defaults to safe YAML methods in base and custom checks.
+ config.BindEnvAndSetDefault("disable_unsafe_yaml", true)
+
+ // Yaml keys which values are stripped from flare
+ config.BindEnvAndSetDefault("flare_stripped_keys", []string{})
+ config.BindEnvAndSetDefault("scrubber.additional_keys", []string{})
+
+ // Agent GUI access port
+ config.BindEnvAndSetDefault("GUI_port", defaultGuiPort)
+
+ if pkgconfigenv.IsContainerized() {
+ // In serverless-containerized environments (e.g Fargate)
+ // it's impossible to mount host volumes.
+ // Make sure the host paths exist before setting-up the default values.
+ // Fallback to the container paths if host paths aren't mounted.
+ if pathExists("/host/proc") {
+ config.SetDefault("procfs_path", "/host/proc")
+ config.SetDefault("container_proc_root", "/host/proc")
+
+ // Used by some librairies (like gopsutil)
+ if v := os.Getenv("HOST_PROC"); v == "" {
+ os.Setenv("HOST_PROC", "/host/proc")
+ }
+ } else {
+ config.SetDefault("procfs_path", "/proc")
+ config.SetDefault("container_proc_root", "/proc")
+ }
+ if pathExists("/host/sys/fs/cgroup/") {
+ config.SetDefault("container_cgroup_root", "/host/sys/fs/cgroup/")
+ } else {
+ config.SetDefault("container_cgroup_root", "/sys/fs/cgroup/")
+ }
+ } else {
+ config.SetDefault("container_proc_root", "/proc")
+ // for amazon linux the cgroup directory on host is /cgroup/
+ // we pick memory.stat to make sure it exists and not empty
+ if _, err := os.Stat("/cgroup/memory/memory.stat"); !os.IsNotExist(err) {
+ config.SetDefault("container_cgroup_root", "/cgroup/")
+ } else {
+ config.SetDefault("container_cgroup_root", "/sys/fs/cgroup/")
+ }
+ }
+
+ config.BindEnv("procfs_path")
+ config.BindEnv("container_proc_root")
+ config.BindEnv("container_cgroup_root")
+ config.BindEnvAndSetDefault("ignore_host_etc", false)
+
+ config.BindEnvAndSetDefault("proc_root", "/proc")
+ config.BindEnvAndSetDefault("histogram_aggregates", []string{"max", "median", "avg", "count"})
+ config.BindEnvAndSetDefault("histogram_percentiles", []string{"0.95"})
+ config.BindEnvAndSetDefault("aggregator_stop_timeout", 2)
+ config.BindEnvAndSetDefault("aggregator_buffer_size", 100)
+ config.BindEnvAndSetDefault("aggregator_use_tags_store", true)
+ config.BindEnvAndSetDefault("basic_telemetry_add_container_tags", false) // configure adding the agent container tags to the basic agent telemetry metrics (e.g. `datadog.agent.running`)
+ config.BindEnvAndSetDefault("aggregator_flush_metrics_and_serialize_in_parallel_chan_size", 200)
+ config.BindEnvAndSetDefault("aggregator_flush_metrics_and_serialize_in_parallel_buffer_size", 4000)
+
+ // Serializer
+ config.BindEnvAndSetDefault("enable_stream_payload_serialization", true)
+ config.BindEnvAndSetDefault("enable_service_checks_stream_payload_serialization", true)
+ config.BindEnvAndSetDefault("enable_events_stream_payload_serialization", true)
+ config.BindEnvAndSetDefault("enable_sketch_stream_payload_serialization", true)
+ config.BindEnvAndSetDefault("enable_json_stream_shared_compressor_buffers", true)
+
+ // Warning: do not change the following values. Your payloads will get dropped by Datadog's intake.
+ config.BindEnvAndSetDefault("serializer_max_payload_size", 2*megaByte+megaByte/2)
+ config.BindEnvAndSetDefault("serializer_max_uncompressed_payload_size", 4*megaByte)
+ config.BindEnvAndSetDefault("serializer_max_series_points_per_payload", 10000)
+ config.BindEnvAndSetDefault("serializer_max_series_payload_size", 512000)
+ config.BindEnvAndSetDefault("serializer_max_series_uncompressed_payload_size", 5242880)
+ config.BindEnvAndSetDefault("serializer_compressor_kind", DefaultCompressorKind)
+
+ config.BindEnvAndSetDefault("use_v2_api.series", true)
+ // Serializer: allow user to blacklist any kind of payload to be sent
+ config.BindEnvAndSetDefault("enable_payloads.events", true)
+ config.BindEnvAndSetDefault("enable_payloads.series", true)
+ config.BindEnvAndSetDefault("enable_payloads.service_checks", true)
+ config.BindEnvAndSetDefault("enable_payloads.sketches", true)
+ config.BindEnvAndSetDefault("enable_payloads.json_to_v1_intake", true)
+
+ // Forwarder
+ config.BindEnvAndSetDefault("additional_endpoints", map[string][]string{})
+ config.BindEnvAndSetDefault("forwarder_timeout", 20)
+ config.BindEnv("forwarder_retry_queue_max_size") // Deprecated in favor of `forwarder_retry_queue_payloads_max_size`
+ config.BindEnv("forwarder_retry_queue_payloads_max_size") // Default value is defined inside `NewOptions` in pkg/forwarder/forwarder.go
+ config.BindEnvAndSetDefault("forwarder_connection_reset_interval", 0) // in seconds, 0 means disabled
+ config.BindEnvAndSetDefault("forwarder_apikey_validation_interval", DefaultAPIKeyValidationInterval) // in minutes
+ config.BindEnvAndSetDefault("forwarder_num_workers", 1)
+ config.BindEnvAndSetDefault("forwarder_stop_timeout", 2)
+ // Forwarder retry settings
+ config.BindEnvAndSetDefault("forwarder_backoff_factor", 2)
+ config.BindEnvAndSetDefault("forwarder_backoff_base", 2)
+ config.BindEnvAndSetDefault("forwarder_backoff_max", 64)
+ config.BindEnvAndSetDefault("forwarder_recovery_interval", DefaultForwarderRecoveryInterval)
+ config.BindEnvAndSetDefault("forwarder_recovery_reset", false)
+
+ // Forwarder storage on disk
+ config.BindEnvAndSetDefault("forwarder_storage_path", "")
+ config.BindEnvAndSetDefault("forwarder_outdated_file_in_days", 10)
+ config.BindEnvAndSetDefault("forwarder_flush_to_disk_mem_ratio", 0.5)
+ config.BindEnvAndSetDefault("forwarder_storage_max_size_in_bytes", 0) // 0 means disabled. This is a BETA feature.
+ config.BindEnvAndSetDefault("forwarder_storage_max_disk_ratio", 0.80) // Do not store transactions on disk when the disk usage exceeds 80% of the disk capacity. Use 80% as some applications do not behave well when the disk space is very small.
+ config.BindEnvAndSetDefault("forwarder_retry_queue_capacity_time_interval_sec", 900) // 15 mins
+
+ // Forwarder channels buffer size
+ config.BindEnvAndSetDefault("forwarder_high_prio_buffer_size", 100)
+ config.BindEnvAndSetDefault("forwarder_low_prio_buffer_size", 100)
+ config.BindEnvAndSetDefault("forwarder_requeue_buffer_size", 100)
+
+ // Dogstatsd
+ config.BindEnvAndSetDefault("use_dogstatsd", true)
+ config.BindEnvAndSetDefault("dogstatsd_port", 8125) // Notice: 0 means UDP port closed
+ config.BindEnvAndSetDefault("dogstatsd_pipe_name", "") // experimental and not officially supported for now.
+ // Experimental and not officially supported for now.
+ // Options are: udp, uds, named_pipe
+ config.BindEnvAndSetDefault("dogstatsd_eol_required", []string{})
+
+ // The following options allow to configure how the dogstatsd intake buffers and queues incoming datagrams.
+ // When a datagram is received it is first added to a datagrams buffer. This buffer fills up until
+ // we reach `dogstatsd_packet_buffer_size` datagrams or after `dogstatsd_packet_buffer_flush_timeout` ms.
+ // After this happens we flush this buffer of datagrams to a queue for processing. The size of this queue
+ // is `dogstatsd_queue_size`.
+ config.BindEnvAndSetDefault("dogstatsd_buffer_size", 1024*8)
+ config.BindEnvAndSetDefault("dogstatsd_packet_buffer_size", 32)
+ config.BindEnvAndSetDefault("dogstatsd_packet_buffer_flush_timeout", 100*time.Millisecond)
+ config.BindEnvAndSetDefault("dogstatsd_queue_size", 1024)
+
+ config.BindEnvAndSetDefault("dogstatsd_non_local_traffic", false)
+ config.BindEnvAndSetDefault("dogstatsd_socket", "") // Notice: empty means feature disabled
+ config.BindEnvAndSetDefault("dogstatsd_stream_socket", "") // Experimental || Notice: empty means feature disabled
+ config.BindEnvAndSetDefault("dogstatsd_pipeline_autoadjust", false)
+ config.BindEnvAndSetDefault("dogstatsd_pipeline_autoadjust_strategy", "max_throughput")
+ config.BindEnvAndSetDefault("dogstatsd_pipeline_count", 1)
+ config.BindEnvAndSetDefault("dogstatsd_stats_port", 5000)
+ config.BindEnvAndSetDefault("dogstatsd_stats_enable", false)
+ config.BindEnvAndSetDefault("dogstatsd_stats_buffer", 10)
+ config.BindEnvAndSetDefault("dogstatsd_telemetry_enabled_listener_id", false)
+ // Control how dogstatsd-stats logs can be generated
+ config.BindEnvAndSetDefault("dogstatsd_log_file", "")
+ config.BindEnvAndSetDefault("dogstatsd_logging_enabled", true)
+ config.BindEnvAndSetDefault("dogstatsd_log_file_max_rolls", 3)
+ config.BindEnvAndSetDefault("dogstatsd_log_file_max_size", "10Mb")
+ // Control for how long counter would be sampled to 0 if not received
+ config.BindEnvAndSetDefault("dogstatsd_expiry_seconds", 300)
+ // Control how long we keep dogstatsd contexts in memory.
+ config.BindEnvAndSetDefault("dogstatsd_context_expiry_seconds", 20)
+ config.BindEnvAndSetDefault("dogstatsd_origin_detection", false) // Only supported for socket traffic
+ config.BindEnvAndSetDefault("dogstatsd_origin_detection_client", false)
+ config.BindEnvAndSetDefault("dogstatsd_origin_optout_enabled", true)
+ config.BindEnvAndSetDefault("dogstatsd_so_rcvbuf", 0)
+ config.BindEnvAndSetDefault("dogstatsd_metrics_stats_enable", false)
+ config.BindEnvAndSetDefault("dogstatsd_tags", []string{})
+ config.BindEnvAndSetDefault("dogstatsd_mapper_cache_size", 1000)
+ config.BindEnvAndSetDefault("dogstatsd_string_interner_size", 4096)
+ // Enable check for Entity-ID presence when enriching Dogstatsd metrics with tags
+ config.BindEnvAndSetDefault("dogstatsd_entity_id_precedence", false)
+ // Sends Dogstatsd parse errors to the Debug level instead of the Error level
+ config.BindEnvAndSetDefault("dogstatsd_disable_verbose_logs", false)
+ // Location to store dogstatsd captures by default
+ config.BindEnvAndSetDefault("dogstatsd_capture_path", "")
+ // Depth of the channel the capture writer reads before persisting to disk.
+ // Default is 0 - blocking channel
+ config.BindEnvAndSetDefault("dogstatsd_capture_depth", 0)
+ // Enable the no-aggregation pipeline.
+ config.BindEnvAndSetDefault("dogstatsd_no_aggregation_pipeline", true)
+ // How many metrics maximum in payloads sent by the no-aggregation pipeline to the intake.
+ config.BindEnvAndSetDefault("dogstatsd_no_aggregation_pipeline_batch_size", 2048)
+ // Force the amount of dogstatsd workers (mainly used for benchmarks or some very specific use-case)
+ config.BindEnvAndSetDefault("dogstatsd_workers_count", 0)
+
+ // To enable the following feature, GODEBUG must contain `madvdontneed=1`
+ config.BindEnvAndSetDefault("dogstatsd_mem_based_rate_limiter.enabled", false)
+ config.BindEnvAndSetDefault("dogstatsd_mem_based_rate_limiter.low_soft_limit", 0.7)
+ config.BindEnvAndSetDefault("dogstatsd_mem_based_rate_limiter.high_soft_limit", 0.8)
+ config.BindEnvAndSetDefault("dogstatsd_mem_based_rate_limiter.go_gc", 1) // 0 means don't call SetGCPercent
+ config.BindEnvAndSetDefault("dogstatsd_mem_based_rate_limiter.memory_ballast", int64(1024*1024*1024*8))
+ config.BindEnvAndSetDefault("dogstatsd_mem_based_rate_limiter.rate_check.min", 0.01)
+ config.BindEnvAndSetDefault("dogstatsd_mem_based_rate_limiter.rate_check.max", 1)
+ config.BindEnvAndSetDefault("dogstatsd_mem_based_rate_limiter.rate_check.factor", 2)
+ config.BindEnvAndSetDefault("dogstatsd_mem_based_rate_limiter.soft_limit_freeos_check.min", 0.01)
+ config.BindEnvAndSetDefault("dogstatsd_mem_based_rate_limiter.soft_limit_freeos_check.max", 0.1)
+ config.BindEnvAndSetDefault("dogstatsd_mem_based_rate_limiter.soft_limit_freeos_check.factor", 1.5)
+
+ config.BindEnv("dogstatsd_mapper_profiles")
+ config.SetEnvKeyTransformer("dogstatsd_mapper_profiles", func(in string) interface{} {
+ var mappings []MappingProfile
+ if err := json.Unmarshal([]byte(in), &mappings); err != nil {
+ log.Errorf(`"dogstatsd_mapper_profiles" can not be parsed: %v`, err)
+ }
+ return mappings
+ })
+
+ config.BindEnvAndSetDefault("statsd_forward_host", "")
+ config.BindEnvAndSetDefault("statsd_forward_port", 0)
+ config.BindEnvAndSetDefault("statsd_metric_namespace", "")
+ config.BindEnvAndSetDefault("statsd_metric_namespace_blacklist", StandardStatsdPrefixes)
+ config.BindEnvAndSetDefault("statsd_metric_blocklist", []string{})
+ config.BindEnvAndSetDefault("statsd_metric_blocklist_match_prefix", false)
+
+ // Autoconfig
+ // Defaut Timeout in second when talking to storage for configuration (etcd, zookeeper, ...)
+ config.BindEnvAndSetDefault("autoconf_template_url_timeout", 5)
+ // Where to look for check templates if no custom path is defined
+ config.BindEnvAndSetDefault("autoconf_template_dir", "/datadog/check_configs")
+ config.BindEnvAndSetDefault("autoconf_config_files_poll", false)
+ config.BindEnvAndSetDefault("autoconf_config_files_poll_interval", 60)
+ config.BindEnvAndSetDefault("exclude_pause_container", true)
+ config.BindEnvAndSetDefault("ac_include", []string{})
+ config.BindEnvAndSetDefault("ac_exclude", []string{})
+ // ac_load_timeout is used to delay the introduction of sources other than
+ // the ones automatically loaded by the AC, into the logs agent.
+ // It is mainly here to delay the introduction of the container_collect_all
+ // in the logs agent, to avoid it to tail all the available containers.
+ config.BindEnvAndSetDefault("ac_load_timeout", 30000) // in milliseconds
+ config.BindEnvAndSetDefault("container_include", []string{})
+ config.BindEnvAndSetDefault("container_exclude", []string{})
+ config.BindEnvAndSetDefault("container_include_metrics", []string{})
+ config.BindEnvAndSetDefault("container_exclude_metrics", []string{})
+ config.BindEnvAndSetDefault("container_include_logs", []string{})
+ config.BindEnvAndSetDefault("container_exclude_logs", []string{})
+ config.BindEnvAndSetDefault("container_exclude_stopped_age", DefaultAuditorTTL-1) // in hours
+ config.BindEnvAndSetDefault("ad_config_poll_interval", int64(10)) // in seconds
+ config.BindEnvAndSetDefault("extra_listeners", []string{})
+ config.BindEnvAndSetDefault("extra_config_providers", []string{})
+ config.BindEnvAndSetDefault("ignore_autoconf", []string{})
+ config.BindEnvAndSetDefault("autoconfig_from_environment", true)
+ config.BindEnvAndSetDefault("autoconfig_exclude_features", []string{})
+ config.BindEnvAndSetDefault("autoconfig_include_features", []string{})
+
+ // Docker
+ config.BindEnvAndSetDefault("docker_query_timeout", int64(5))
+ config.BindEnvAndSetDefault("docker_labels_as_tags", map[string]string{})
+ config.BindEnvAndSetDefault("docker_env_as_tags", map[string]string{})
+ config.BindEnvAndSetDefault("kubernetes_pod_labels_as_tags", map[string]string{})
+ config.BindEnvAndSetDefault("kubernetes_pod_annotations_as_tags", map[string]string{})
+ config.BindEnvAndSetDefault("kubernetes_node_labels_as_tags", map[string]string{})
+ config.BindEnvAndSetDefault("kubernetes_node_annotations_as_tags", map[string]string{"cluster.k8s.io/machine": "kube_machine"})
+ config.BindEnvAndSetDefault("kubernetes_node_annotations_as_host_aliases", []string{"cluster.k8s.io/machine"})
+ config.BindEnvAndSetDefault("kubernetes_node_label_as_cluster_name", "")
+ config.BindEnvAndSetDefault("kubernetes_namespace_labels_as_tags", map[string]string{})
+ config.BindEnvAndSetDefault("container_cgroup_prefix", "")
+
+ // CRI
+ config.BindEnvAndSetDefault("cri_socket_path", "") // empty is disabled
+ config.BindEnvAndSetDefault("cri_connection_timeout", int64(1)) // in seconds
+ config.BindEnvAndSetDefault("cri_query_timeout", int64(5)) // in seconds
+
+ // Containerd
+ config.BindEnvAndSetDefault("containerd_namespace", []string{})
+ config.BindEnvAndSetDefault("containerd_namespaces", []string{}) // alias for containerd_namespace
+ config.BindEnvAndSetDefault("containerd_exclude_namespaces", []string{"moby"})
+ config.BindEnvAndSetDefault("container_env_as_tags", map[string]string{})
+ config.BindEnvAndSetDefault("container_labels_as_tags", map[string]string{})
+
+ // Podman
+ config.BindEnvAndSetDefault("podman_db_path", "")
+
+ // Kubernetes
+ config.BindEnvAndSetDefault("kubernetes_kubelet_host", "")
+ config.BindEnvAndSetDefault("kubernetes_kubelet_nodename", "")
+ config.BindEnvAndSetDefault("eks_fargate", false)
+ config.BindEnvAndSetDefault("kubernetes_http_kubelet_port", 10255)
+ config.BindEnvAndSetDefault("kubernetes_https_kubelet_port", 10250)
+
+ config.BindEnvAndSetDefault("kubelet_tls_verify", true)
+ config.BindEnvAndSetDefault("kubelet_core_check_enabled", true)
+ config.BindEnvAndSetDefault("collect_kubernetes_events", false)
+ config.BindEnvAndSetDefault("kubelet_client_ca", "")
+
+ config.BindEnvAndSetDefault("kubelet_auth_token_path", "")
+ config.BindEnvAndSetDefault("kubelet_client_crt", "")
+ config.BindEnvAndSetDefault("kubelet_client_key", "")
+
+ config.BindEnvAndSetDefault("kubernetes_pod_expiration_duration", 15*60) // in seconds, default 15 minutes
+ config.BindEnvAndSetDefault("kubelet_wait_on_missing_container", 0)
+ config.BindEnvAndSetDefault("kubelet_cache_pods_duration", 5) // Polling frequency in seconds of the agent to the kubelet "/pods" endpoint
+ config.BindEnvAndSetDefault("kubelet_listener_polling_interval", 5) // Polling frequency in seconds of the pod watcher to detect new pods/containers (affected by kubelet_cache_pods_duration setting)
+ config.BindEnvAndSetDefault("kubernetes_collect_metadata_tags", true)
+ config.BindEnvAndSetDefault("kubernetes_metadata_tag_update_freq", 60) // Polling frequency of the Agent to the DCA in seconds (gets the local cache if the DCA is disabled)
+ config.BindEnvAndSetDefault("kubernetes_apiserver_client_timeout", 10)
+ config.BindEnvAndSetDefault("kubernetes_apiserver_informer_client_timeout", 0)
+ config.BindEnvAndSetDefault("kubernetes_map_services_on_ip", false) // temporary opt-out of the new mapping logic
+ config.BindEnvAndSetDefault("kubernetes_apiserver_use_protobuf", false)
+ config.BindEnvAndSetDefault("kubernetes_ad_tags_disabled", []string{})
+
+ config.BindEnvAndSetDefault("prometheus_scrape.enabled", false) // Enables the prometheus config provider
+ config.BindEnvAndSetDefault("prometheus_scrape.service_endpoints", false) // Enables Service Endpoints checks in the prometheus config provider
+ config.BindEnv("prometheus_scrape.checks") // Defines any extra prometheus/openmetrics check configurations to be handled by the prometheus config provider
+ config.BindEnvAndSetDefault("prometheus_scrape.version", 1) // Version of the openmetrics check to be scheduled by the Prometheus auto-discovery
+
+ // Network Devices Monitoring
+ bindEnvAndSetLogsConfigKeys(config, "network_devices.metadata.")
+ config.BindEnvAndSetDefault("network_devices.namespace", "default")
+
+ config.SetKnown("snmp_listener.discovery_interval")
+ config.SetKnown("snmp_listener.allowed_failures")
+ config.SetKnown("snmp_listener.discovery_allowed_failures")
+ config.SetKnown("snmp_listener.collect_device_metadata")
+ config.SetKnown("snmp_listener.collect_topology")
+ config.SetKnown("snmp_listener.workers")
+ config.SetKnown("snmp_listener.configs")
+ config.SetKnown("snmp_listener.loader")
+ config.SetKnown("snmp_listener.min_collection_interval")
+ config.SetKnown("snmp_listener.namespace")
+ config.SetKnown("snmp_listener.use_device_id_as_hostname")
+ config.SetKnown("snmp_listener.ping")
+ config.SetKnown("snmp_listener.ping.enabled")
+ config.SetKnown("snmp_listener.ping.count")
+ config.SetKnown("snmp_listener.ping.interval")
+ config.SetKnown("snmp_listener.ping.timeout")
+ config.SetKnown("snmp_listener.ping.linux")
+ config.SetKnown("snmp_listener.ping.linux.use_raw_socket")
+
+ // network_devices.autodiscovery has precedence over snmp_listener config
+ // snmp_listener config is still here for legacy reasons
+ config.SetKnown("network_devices.autodiscovery.discovery_interval")
+ config.SetKnown("network_devices.autodiscovery.allowed_failures")
+ config.SetKnown("network_devices.autodiscovery.discovery_allowed_failures")
+ config.SetKnown("network_devices.autodiscovery.collect_device_metadata")
+ config.SetKnown("network_devices.autodiscovery.collect_topology")
+ config.SetKnown("network_devices.autodiscovery.workers")
+ config.SetKnown("network_devices.autodiscovery.configs")
+ config.SetKnown("network_devices.autodiscovery.loader")
+ config.SetKnown("network_devices.autodiscovery.min_collection_interval")
+ config.SetKnown("network_devices.autodiscovery.namespace")
+ config.SetKnown("network_devices.autodiscovery.use_device_id_as_hostname")
+ config.SetKnown("network_devices.autodiscovery.ping")
+ config.SetKnown("network_devices.autodiscovery.ping.enabled")
+ config.SetKnown("network_devices.autodiscovery.ping.count")
+ config.SetKnown("network_devices.autodiscovery.ping.interval")
+ config.SetKnown("network_devices.autodiscovery.ping.timeout")
+ config.SetKnown("network_devices.autodiscovery.ping.linux")
+ config.SetKnown("network_devices.autodiscovery.ping.linux.use_raw_socket")
+
+ bindEnvAndSetLogsConfigKeys(config, "network_devices.snmp_traps.forwarder.")
+ config.BindEnvAndSetDefault("network_devices.snmp_traps.enabled", false)
+ config.BindEnvAndSetDefault("network_devices.snmp_traps.port", 9162)
+ config.BindEnvAndSetDefault("network_devices.snmp_traps.community_strings", []string{})
+ config.BindEnvAndSetDefault("network_devices.snmp_traps.bind_host", "0.0.0.0")
+ config.BindEnvAndSetDefault("network_devices.snmp_traps.stop_timeout", 5) // in seconds
+ config.SetKnown("network_devices.snmp_traps.users")
+
+ // NetFlow
+ config.SetKnown("network_devices.netflow.listeners")
+ config.SetKnown("network_devices.netflow.stop_timeout")
+ config.SetKnown("network_devices.netflow.aggregator_buffer_size")
+ config.SetKnown("network_devices.netflow.aggregator_flush_interval")
+ config.SetKnown("network_devices.netflow.aggregator_flow_context_ttl")
+ config.SetKnown("network_devices.netflow.aggregator_port_rollup_threshold")
+ config.SetKnown("network_devices.netflow.aggregator_rollup_tracker_refresh_interval")
+ config.BindEnvAndSetDefault("network_devices.netflow.enabled", "false")
+ bindEnvAndSetLogsConfigKeys(config, "network_devices.netflow.forwarder.")
+
+ // Network Path
+ bindEnvAndSetLogsConfigKeys(config, "network_path.forwarder.")
+
+ // Kube ApiServer
+ config.BindEnvAndSetDefault("kubernetes_kubeconfig_path", "")
+ config.BindEnvAndSetDefault("kubernetes_apiserver_ca_path", "")
+ config.BindEnvAndSetDefault("kubernetes_apiserver_tls_verify", true)
+ config.BindEnvAndSetDefault("leader_lease_duration", "60")
+ config.BindEnvAndSetDefault("leader_election", false)
+ config.BindEnvAndSetDefault("leader_lease_name", "datadog-leader-election")
+ config.BindEnvAndSetDefault("leader_election_default_resource", "configmap")
+ config.BindEnvAndSetDefault("leader_election_release_on_shutdown", true)
+ config.BindEnvAndSetDefault("kube_resources_namespace", "")
+ config.BindEnvAndSetDefault("kube_cache_sync_timeout_seconds", 5)
+
+ // Datadog cluster agent
+ config.BindEnvAndSetDefault("cluster_agent.enabled", false)
+ config.BindEnvAndSetDefault("cluster_agent.cmd_port", 5005)
+ config.BindEnvAndSetDefault("cluster_agent.allow_legacy_tls", false)
+ config.BindEnvAndSetDefault("cluster_agent.auth_token", "")
+ config.BindEnvAndSetDefault("cluster_agent.url", "")
+ config.BindEnvAndSetDefault("cluster_agent.kubernetes_service_name", "datadog-cluster-agent")
+ config.BindEnvAndSetDefault("cluster_agent.service_account_name", "")
+ config.BindEnvAndSetDefault("cluster_agent.tagging_fallback", false)
+ config.BindEnvAndSetDefault("cluster_agent.server.read_timeout_seconds", 2)
+ config.BindEnvAndSetDefault("cluster_agent.server.write_timeout_seconds", 2)
+ config.BindEnvAndSetDefault("cluster_agent.server.idle_timeout_seconds", 60)
+ config.BindEnvAndSetDefault("cluster_agent.refresh_on_cache_miss", true)
+ config.BindEnvAndSetDefault("cluster_agent.serve_nozzle_data", false)
+ config.BindEnvAndSetDefault("cluster_agent.sidecars_tags", false)
+ config.BindEnvAndSetDefault("cluster_agent.isolation_segments_tags", false)
+ config.BindEnvAndSetDefault("cluster_agent.token_name", "datadogtoken")
+ config.BindEnvAndSetDefault("cluster_agent.max_leader_connections", 100)
+ config.BindEnvAndSetDefault("cluster_agent.client_reconnect_period_seconds", 1200)
+ config.BindEnvAndSetDefault("cluster_agent.collect_kubernetes_tags", false)
+ config.BindEnvAndSetDefault("cluster_agent.kubernetes_resources_collection.pod_annotations_exclude", []string{
+ `^kubectl\.kubernetes\.io\/last-applied-configuration$`,
+ `^ad\.datadoghq\.com\/([[:alnum:]]+\.)?(checks|check_names|init_configs|instances)$`,
+ })
+ config.BindEnvAndSetDefault("metrics_port", "5000")
+ config.BindEnvAndSetDefault("cluster_agent.language_detection.patcher.enabled", true)
+ config.BindEnvAndSetDefault("cluster_agent.language_detection.patcher.base_backoff", "5m")
+ config.BindEnvAndSetDefault("cluster_agent.language_detection.patcher.max_backoff", "1h")
+ // sets the expiration deadline (TTL) for reported languages
+ config.BindEnvAndSetDefault("cluster_agent.language_detection.cleanup.language_ttl", "30m")
+ // language annotation cleanup period
+ config.BindEnvAndSetDefault("cluster_agent.language_detection.cleanup.period", "10m")
+
+ // Metadata endpoints
+
+ // Defines the maximum size of hostame gathered from EC2, GCE, Azure, Alibaba, Oracle and Tencent cloud metadata
+ // endpoints (all cloudprovider except IBM). IBM cloud ignore this setting as their API return a huge JSON with
+ // all the metadata for the VM.
+ // Used internally to protect against configurations where metadata endpoints return incorrect values with 200 status codes.
+ config.BindEnvAndSetDefault("metadata_endpoints_max_hostname_size", 255)
+
+ // Duration during which the host tags will be submitted with metrics.
+ config.BindEnvAndSetDefault("expected_tags_duration", time.Duration(0))
+
+ // EC2
+ config.BindEnvAndSetDefault("ec2_use_windows_prefix_detection", false)
+ config.BindEnvAndSetDefault("ec2_metadata_timeout", 300) // value in milliseconds
+ config.BindEnvAndSetDefault("ec2_metadata_token_lifetime", 21600) // value in seconds
+ config.BindEnvAndSetDefault("ec2_prefer_imdsv2", false)
+ config.BindEnvAndSetDefault("ec2_prioritize_instance_id_as_hostname", false) // used to bypass the hostname detection logic and force the EC2 instance ID as a hostname.
+ config.BindEnvAndSetDefault("ec2_use_dmi", true) // should the agent leverage DMI information to know if it's running on EC2 or not. Enabling this will add the instance ID from DMI to the host alias list.
+ config.BindEnvAndSetDefault("collect_ec2_tags", false)
+ config.BindEnvAndSetDefault("collect_ec2_tags_use_imds", false)
+ config.BindEnvAndSetDefault("exclude_ec2_tags", []string{})
+
+ // ECS
+ config.BindEnvAndSetDefault("ecs_agent_url", "") // Will be autodetected
+ config.BindEnvAndSetDefault("ecs_agent_container_name", "ecs-agent")
+ config.BindEnvAndSetDefault("ecs_collect_resource_tags_ec2", false)
+ config.BindEnvAndSetDefault("ecs_resource_tags_replace_colon", false)
+ config.BindEnvAndSetDefault("ecs_metadata_timeout", 500) // value in milliseconds
+ config.BindEnvAndSetDefault("ecs_task_collection_enabled", false)
+ config.BindEnvAndSetDefault("ecs_task_cache_ttl", 3*time.Minute)
+ config.BindEnvAndSetDefault("ecs_task_collection_rate", 35)
+ config.BindEnvAndSetDefault("ecs_task_collection_burst", 60)
+
+ // GCE
+ config.BindEnvAndSetDefault("collect_gce_tags", true)
+ config.BindEnvAndSetDefault("exclude_gce_tags", []string{
+ "kube-env", "kubelet-config", "containerd-configure-sh", "startup-script", "shutdown-script",
+ "configure-sh", "sshKeys", "ssh-keys", "user-data", "cli-cert", "ipsec-cert", "ssl-cert", "google-container-manifest",
+ "bosh_settings", "windows-startup-script-ps1", "common-psm1", "k8s-node-setup-psm1", "serial-port-logging-enable",
+ "enable-oslogin", "disable-address-manager", "disable-legacy-endpoints", "windows-keys", "kubeconfig", "gce-container-declaration",
+ })
+ config.BindEnvAndSetDefault("gce_send_project_id_tag", false)
+ config.BindEnvAndSetDefault("gce_metadata_timeout", 1000) // value in milliseconds
+
+ // Cloud Foundry
+ config.BindEnvAndSetDefault("cloud_foundry", false)
+ config.BindEnvAndSetDefault("bosh_id", "")
+ config.BindEnvAndSetDefault("cf_os_hostname_aliasing", false)
+ config.BindEnvAndSetDefault("cloud_foundry_buildpack", false)
+
+ // Cloud Foundry BBS
+ config.BindEnvAndSetDefault("cloud_foundry_bbs.url", "https://bbs.service.cf.internal:8889")
+ config.BindEnvAndSetDefault("cloud_foundry_bbs.poll_interval", 15)
+ config.BindEnvAndSetDefault("cloud_foundry_bbs.ca_file", "")
+ config.BindEnvAndSetDefault("cloud_foundry_bbs.cert_file", "")
+ config.BindEnvAndSetDefault("cloud_foundry_bbs.key_file", "")
+ config.BindEnvAndSetDefault("cloud_foundry_bbs.env_include", []string{})
+ config.BindEnvAndSetDefault("cloud_foundry_bbs.env_exclude", []string{})
+
+ // Cloud Foundry CC
+ config.BindEnvAndSetDefault("cloud_foundry_cc.url", "https://cloud-controller-ng.service.cf.internal:9024")
+ config.BindEnvAndSetDefault("cloud_foundry_cc.client_id", "")
+ config.BindEnvAndSetDefault("cloud_foundry_cc.client_secret", "")
+ config.BindEnvAndSetDefault("cloud_foundry_cc.poll_interval", 60)
+ config.BindEnvAndSetDefault("cloud_foundry_cc.skip_ssl_validation", false)
+ config.BindEnvAndSetDefault("cloud_foundry_cc.apps_batch_size", 5000)
+
+ // Cloud Foundry Garden
+ config.BindEnvAndSetDefault("cloud_foundry_garden.listen_network", "unix")
+ config.BindEnvAndSetDefault("cloud_foundry_garden.listen_address", "/var/vcap/data/garden/garden.sock")
+
+ // Cloud Foundry Container Tagger
+ config.BindEnvAndSetDefault("cloud_foundry_container_tagger.shell_path", "/bin/sh")
+ config.BindEnvAndSetDefault("cloud_foundry_container_tagger.retry_count", 10)
+ config.BindEnvAndSetDefault("cloud_foundry_container_tagger.retry_interval", 10)
+
+ // Azure
+ config.BindEnvAndSetDefault("azure_hostname_style", "os")
+
+ // IBM cloud
+ // We use a long timeout here since the metadata and token API can be very slow sometimes.
+ config.BindEnvAndSetDefault("ibm_metadata_timeout", 5) // value in seconds
+
+ // JMXFetch
+ config.BindEnvAndSetDefault("jmx_custom_jars", []string{})
+ config.BindEnvAndSetDefault("jmx_use_cgroup_memory_limit", false)
+ config.BindEnvAndSetDefault("jmx_use_container_support", false)
+ config.BindEnvAndSetDefault("jmx_max_ram_percentage", float64(25.0))
+ config.BindEnvAndSetDefault("jmx_max_restarts", int64(3))
+ config.BindEnvAndSetDefault("jmx_restart_interval", int64(5))
+ config.BindEnvAndSetDefault("jmx_thread_pool_size", 3)
+ config.BindEnvAndSetDefault("jmx_reconnection_thread_pool_size", 3)
+ config.BindEnvAndSetDefault("jmx_collection_timeout", 60)
+ config.BindEnvAndSetDefault("jmx_check_period", int(defaults.DefaultCheckInterval/time.Millisecond))
+ config.BindEnvAndSetDefault("jmx_reconnection_timeout", 60)
+ config.BindEnvAndSetDefault("jmx_statsd_telemetry_enabled", false)
+ config.BindEnvAndSetDefault("jmx_telemetry_enabled", false)
+ // The following jmx_statsd_client-* options are internal and will not be documented
+ // the queue size is the no. of elements (metrics, event, service checks) it can hold.
+ config.BindEnvAndSetDefault("jmx_statsd_client_queue_size", 4096)
+ config.BindEnvAndSetDefault("jmx_statsd_client_use_non_blocking", false)
+ // the "buffer" here is the socket send buffer (SO_SNDBUF) and the size is in bytes
+ config.BindEnvAndSetDefault("jmx_statsd_client_buffer_size", 0)
+ // the socket timeout (SO_SNDTIMEO) is in milliseconds
+ config.BindEnvAndSetDefault("jmx_statsd_client_socket_timeout", 0)
+
+ // Go_expvar server port
+ config.BindEnvAndSetDefault("expvar_port", "5000")
+
+ // internal profiling
+ config.BindEnvAndSetDefault("internal_profiling.enabled", false)
+ config.BindEnv("internal_profiling.profile_dd_url")
+ config.BindEnvAndSetDefault("internal_profiling.unix_socket", "") // file system path to a unix socket, e.g. `/var/run/datadog/apm.socket`
+ config.BindEnvAndSetDefault("internal_profiling.period", 5*time.Minute)
+ config.BindEnvAndSetDefault("internal_profiling.cpu_duration", 1*time.Minute)
+ config.BindEnvAndSetDefault("internal_profiling.block_profile_rate", 0)
+ config.BindEnvAndSetDefault("internal_profiling.mutex_profile_fraction", 0)
+ config.BindEnvAndSetDefault("internal_profiling.enable_goroutine_stacktraces", false)
+ config.BindEnvAndSetDefault("internal_profiling.enable_block_profiling", false)
+ config.BindEnvAndSetDefault("internal_profiling.enable_mutex_profiling", false)
+ config.BindEnvAndSetDefault("internal_profiling.delta_profiles", true)
+ config.BindEnvAndSetDefault("internal_profiling.extra_tags", []string{})
+ config.BindEnvAndSetDefault("internal_profiling.custom_attributes", []string{"check_id"})
+
+ config.BindEnvAndSetDefault("internal_profiling.capture_all_allocations", false)
+
+ // Logs Agent
+
+ // External Use: modify those parameters to configure the logs-agent.
+ // enable the logs-agent:
+ config.BindEnvAndSetDefault("logs_enabled", false)
+ config.BindEnvAndSetDefault("log_enabled", false) // deprecated, use logs_enabled instead
+ // collect all logs from all containers:
+ config.BindEnvAndSetDefault("logs_config.container_collect_all", false)
+ // add a socks5 proxy:
+ config.BindEnvAndSetDefault("logs_config.socks5_proxy_address", "")
+ // specific logs-agent api-key
+ config.BindEnv("logs_config.api_key")
+
+ // Duration during which the host tags will be submitted with log events.
+ config.BindEnvAndSetDefault("logs_config.expected_tags_duration", time.Duration(0)) // duration-formatted string (parsed by `time.ParseDuration`)
+ // send the logs to the port 443 of the logs-backend via TCP:
+ config.BindEnvAndSetDefault("logs_config.use_port_443", false)
+ // increase the read buffer size of the UDP sockets:
+ config.BindEnvAndSetDefault("logs_config.frame_size", 9000)
+ // maximum log message size in bytes
+ config.BindEnvAndSetDefault("logs_config.max_message_size_bytes", DefaultMaxMessageSizeBytes)
+
+ // increase the number of files that can be tailed in parallel:
+ if runtime.GOOS == "darwin" {
+ // The default limit on darwin is 256.
+ // This is configurable per process on darwin with `ulimit -n` or a launchDaemon config.
+ config.BindEnvAndSetDefault("logs_config.open_files_limit", 200)
+ } else {
+ // There is no effective limit for windows due to use of CreateFile win32 API
+ // The OS default for most linux distributions is 1024
+ config.BindEnvAndSetDefault("logs_config.open_files_limit", 500)
+ }
+ // add global processing rules that are applied on all logs
+ config.BindEnv("logs_config.processing_rules")
+ // enforce the agent to use files to collect container logs on kubernetes environment
+ config.BindEnvAndSetDefault("logs_config.k8s_container_use_file", false)
+ // Enable the agent to use files to collect container logs on standalone docker environment, containers
+ // with an existing registry offset will continue to be tailed from the docker socket unless
+ // logs_config.docker_container_force_use_file is set to true.
+ config.BindEnvAndSetDefault("logs_config.docker_container_use_file", true)
+ // Force tailing from file for all docker container, even the ones with an existing registry entry
+ config.BindEnvAndSetDefault("logs_config.docker_container_force_use_file", false)
+ // While parsing Kubernetes pod logs, use /var/log/containers to validate that
+ // the pod container ID is matching.
+ config.BindEnvAndSetDefault("logs_config.validate_pod_container_id", true)
+ // additional config to ensure initial logs are tagged with kubelet tags
+ // wait (seconds) for tagger before start fetching tags of new AD services
+ config.BindEnvAndSetDefault("logs_config.tagger_warmup_duration", 0) // Disabled by default (0 seconds)
+ // Configurable docker client timeout while communicating with the docker daemon.
+ // It could happen that the docker daemon takes a lot of time gathering timestamps
+ // before starting to send any data when it has stored several large log files.
+ // This field lets you increase the read timeout to prevent the client from
+ // timing out too early in such a situation. Value in seconds.
+ config.BindEnvAndSetDefault("logs_config.docker_client_read_timeout", 30)
+ // Internal Use Only: avoid modifying those configuration parameters, this could lead to unexpected results.
+ config.BindEnvAndSetDefault("logs_config.run_path", defaultRunPath)
+ // DEPRECATED in favor of `logs_config.force_use_http`.
+ config.BindEnvAndSetDefault("logs_config.use_http", false)
+ config.BindEnvAndSetDefault("logs_config.force_use_http", false)
+ // DEPRECATED in favor of `logs_config.force_use_tcp`.
+ config.BindEnvAndSetDefault("logs_config.use_tcp", false)
+ config.BindEnvAndSetDefault("logs_config.force_use_tcp", false)
+
+ bindEnvAndSetLogsConfigKeys(config, "logs_config.")
+ bindEnvAndSetLogsConfigKeys(config, "database_monitoring.samples.")
+ bindEnvAndSetLogsConfigKeys(config, "database_monitoring.activity.")
+ bindEnvAndSetLogsConfigKeys(config, "database_monitoring.metrics.")
+ config.BindEnvAndSetDefault("database_monitoring.autodiscovery.aurora.enabled", false)
+ config.BindEnvAndSetDefault("database_monitoring.autodiscovery.aurora.discovery_interval", 300)
+ config.BindEnvAndSetDefault("database_monitoring.autodiscovery.aurora.region", "")
+ config.BindEnvAndSetDefault("database_monitoring.autodiscovery.aurora.query_timeout", 10)
+ config.BindEnvAndSetDefault("database_monitoring.autodiscovery.aurora.tags", []string{"datadoghq.com/scrape:true"})
+
+ config.BindEnvAndSetDefault("logs_config.dd_port", 10516)
+ config.BindEnvAndSetDefault("logs_config.dev_mode_use_proto", true)
+ config.BindEnvAndSetDefault("logs_config.dd_url_443", "agent-443-intake.logs.datadoghq.com")
+ config.BindEnvAndSetDefault("logs_config.stop_grace_period", 30)
+ // maximum time that the unix tailer will hold a log file open after it has been rotated
+ config.BindEnvAndSetDefault("logs_config.close_timeout", 60)
+ // maximum time that the windows tailer will hold a log file open, while waiting for
+ // the downstream logs pipeline to be ready to accept more data
+ config.BindEnvAndSetDefault("logs_config.windows_open_file_timeout", 5)
+ config.BindEnvAndSetDefault("logs_config.auto_multi_line_detection", false)
+ config.BindEnvAndSetDefault("logs_config.auto_multi_line_extra_patterns", []string{})
+ // The following auto_multi_line settings are experimental and may change
+ config.BindEnvAndSetDefault("logs_config.auto_multi_line_default_sample_size", 500)
+ config.BindEnvAndSetDefault("logs_config.auto_multi_line_default_match_timeout", 30) // Seconds
+ config.BindEnvAndSetDefault("logs_config.auto_multi_line_default_match_threshold", 0.48)
+
+ // If true, the agent looks for container logs in the location used by podman, rather
+ // than docker. This is a temporary configuration parameter to support podman logs until
+ // a more substantial refactor of autodiscovery is made to determine this automatically.
+ config.BindEnvAndSetDefault("logs_config.use_podman_logs", false)
+
+ // If set, the agent will look in this path for docker container log files. Use this option if
+ // docker's `data-root` has been set to a custom path and you wish to ingest docker logs from files. In
+ // order to check your docker data-root directory, run the command `docker info -f '{{.DockerRootDir}}'`
+ // See more documentation here:
+ // https://docs.docker.com/engine/reference/commandline/dockerd/.
+ config.BindEnvAndSetDefault("logs_config.docker_path_override", "")
+
+ config.BindEnvAndSetDefault("logs_config.auditor_ttl", DefaultAuditorTTL) // in hours
+ // Timeout in milliseonds used when performing agreggation operations,
+ // including multi-line log processing rules and chunked line reaggregation.
+ // It may be useful to increase it when logs writing is slowed down, that
+ // could happen while serializing large objects on log lines.
+ config.BindEnvAndSetDefault("logs_config.aggregation_timeout", 1000)
+ // Time in seconds
+ config.BindEnvAndSetDefault("logs_config.file_scan_period", 10.0)
+
+ // Controls how wildcard file log source are prioritized when there are more files
+ // that match wildcard log configurations than the `logs_config.open_files_limit`
+ //
+ // Choices are 'by_name' and 'by_modification_time'. See config_template.yaml for full details.
+ //
+ // WARNING: 'by_modification_time' is less performant than 'by_name' and will trigger
+ // more disk I/O at the wildcard log paths
+ config.BindEnvAndSetDefault("logs_config.file_wildcard_selection_mode", "by_name")
+
+ // The cardinality of tags to send for checks and dogstatsd respectively.
+ // Choices are: low, orchestrator, high.
+ // WARNING: sending orchestrator, or high tags for dogstatsd metrics may create more metrics
+ // (one per container instead of one per host).
+ // Changing this setting may impact your custom metrics billing.
+ config.BindEnvAndSetDefault("checks_tag_cardinality", "low")
+ config.BindEnvAndSetDefault("dogstatsd_tag_cardinality", "low")
+
+ config.BindEnvAndSetDefault("histogram_copy_to_distribution", false)
+ config.BindEnvAndSetDefault("histogram_copy_to_distribution_prefix", "")
+
+ config.BindEnv("api_key")
+
+ config.BindEnvAndSetDefault("hpa_watcher_polling_freq", 10)
+ config.BindEnvAndSetDefault("hpa_watcher_gc_period", 60*5) // 5 minutes
+ config.BindEnvAndSetDefault("hpa_configmap_name", "datadog-custom-metrics")
+ config.BindEnvAndSetDefault("external_metrics_provider.enabled", false)
+ config.BindEnvAndSetDefault("external_metrics_provider.port", 8443)
+ config.BindEnvAndSetDefault("external_metrics_provider.endpoint", "") // Override the Datadog API endpoint to query external metrics from
+ config.BindEnvAndSetDefault("external_metrics_provider.api_key", "") // Override the Datadog API Key for external metrics endpoint
+ config.BindEnvAndSetDefault("external_metrics_provider.app_key", "") // Override the Datadog APP Key for external metrics endpoint
+ config.SetKnown("external_metrics_provider.endpoints") // List of redundant endpoints to query external metrics from
+ config.BindEnvAndSetDefault("external_metrics_provider.refresh_period", 30) // value in seconds. Frequency of calls to Datadog to refresh metric values
+ config.BindEnvAndSetDefault("external_metrics_provider.batch_window", 10) // value in seconds. Batch the events from the Autoscalers informer to push updates to the ConfigMap (GlobalStore)
+ config.BindEnvAndSetDefault("external_metrics_provider.max_age", 120) // value in seconds. 4 cycles from the Autoscaler controller (up to Kubernetes 1.11) is enough to consider a metric stale
+ config.BindEnvAndSetDefault("external_metrics.aggregator", "avg") // aggregator used for the external metrics. Choose from [avg,sum,max,min]
+ config.BindEnvAndSetDefault("external_metrics_provider.max_time_window", 60*60*24) // Maximum window to query to get the metric from Datadog.
+ config.BindEnvAndSetDefault("external_metrics_provider.bucket_size", 60*5) // Window to query to get the metric from Datadog.
+ config.BindEnvAndSetDefault("external_metrics_provider.rollup", 30) // Bucket size to circumvent time aggregation side effects.
+ config.BindEnvAndSetDefault("external_metrics_provider.wpa_controller", false) // Activates the controller for Watermark Pod Autoscalers.
+ config.BindEnvAndSetDefault("external_metrics_provider.use_datadogmetric_crd", false) // Use DatadogMetric CRD with custom Datadog Queries instead of ConfigMap
+ config.BindEnvAndSetDefault("external_metrics_provider.enable_datadogmetric_autogen", true) // Enables autogeneration of DatadogMetrics when the DatadogMetric CRD is in use
+ config.BindEnvAndSetDefault("kubernetes_event_collection_timeout", 100) // timeout between two successful event collections in milliseconds.
+ config.BindEnvAndSetDefault("kubernetes_informers_resync_period", 60*5) // value in seconds. Default to 5 minutes
+ config.BindEnvAndSetDefault("external_metrics_provider.config", map[string]string{}) // list of options that can be used to configure the external metrics server
+ config.BindEnvAndSetDefault("external_metrics_provider.local_copy_refresh_rate", 30) // value in seconds
+ config.BindEnvAndSetDefault("external_metrics_provider.chunk_size", 35) // Maximum number of queries to batch when querying Datadog.
+ config.BindEnvAndSetDefault("external_metrics_provider.split_batches_with_backoff", false) // Splits batches and runs queries with errors individually with an exponential backoff
+ pkgconfigmodel.AddOverrideFunc(sanitizeExternalMetricsProviderChunkSize)
+ // Cluster check Autodiscovery
+ config.BindEnvAndSetDefault("cluster_checks.support_hybrid_ignore_ad_tags", false) // TODO(CINT)(Agent 7.53+) Remove this flag when hybrid ignore_ad_tags is fully deprecated
+ config.BindEnvAndSetDefault("cluster_checks.enabled", false)
+ config.BindEnvAndSetDefault("cluster_checks.node_expiration_timeout", 30) // value in seconds
+ config.BindEnvAndSetDefault("cluster_checks.warmup_duration", 30) // value in seconds
+ config.BindEnvAndSetDefault("cluster_checks.cluster_tag_name", "cluster_name")
+ config.BindEnvAndSetDefault("cluster_checks.extra_tags", []string{})
+ config.BindEnvAndSetDefault("cluster_checks.advanced_dispatching_enabled", false)
+ config.BindEnvAndSetDefault("cluster_checks.rebalance_with_utilization", false) // Experimental. Subject to change. Uses the runners utilization to balance.
+ config.BindEnvAndSetDefault("cluster_checks.rebalance_min_percentage_improvement", 10) // Experimental. Subject to change. Rebalance only if the distribution found improves the current one by this.
+ config.BindEnvAndSetDefault("cluster_checks.clc_runners_port", 5005)
+ config.BindEnvAndSetDefault("cluster_checks.exclude_checks", []string{})
+ config.BindEnvAndSetDefault("cluster_checks.exclude_checks_from_dispatching", []string{})
+ config.BindEnvAndSetDefault("cluster_checks.rebalance_period", 10*time.Minute)
+
+ // Cluster check runner
+ config.BindEnvAndSetDefault("clc_runner_enabled", false)
+ config.BindEnvAndSetDefault("clc_runner_id", "")
+ config.BindEnvAndSetDefault("clc_runner_host", "") // must be set using the Kubernetes downward API
+ config.BindEnvAndSetDefault("clc_runner_port", 5005)
+ config.BindEnvAndSetDefault("clc_runner_server_write_timeout", 15)
+ config.BindEnvAndSetDefault("clc_runner_server_readheader_timeout", 10)
+ config.BindEnvAndSetDefault("clc_runner_remote_tagger_enabled", false)
+
+ // Admission controller
+ config.BindEnvAndSetDefault("admission_controller.enabled", false)
+ config.BindEnvAndSetDefault("admission_controller.mutate_unlabelled", false)
+ config.BindEnvAndSetDefault("admission_controller.port", 8000)
+ config.BindEnvAndSetDefault("admission_controller.container_registry", "gcr.io/datadoghq")
+ config.BindEnvAndSetDefault("admission_controller.timeout_seconds", 10) // in seconds (see kubernetes/kubernetes#71508)
+ config.BindEnvAndSetDefault("admission_controller.service_name", "datadog-admission-controller")
+ config.BindEnvAndSetDefault("admission_controller.certificate.validity_bound", 365*24) // validity bound of the certificate created by the controller (in hours, default 1 year)
+ config.BindEnvAndSetDefault("admission_controller.certificate.expiration_threshold", 30*24) // how long before its expiration a certificate should be refreshed (in hours, default 1 month)
+ config.BindEnvAndSetDefault("admission_controller.certificate.secret_name", "webhook-certificate") // name of the Secret object containing the webhook certificate
+ config.BindEnvAndSetDefault("admission_controller.webhook_name", "datadog-webhook")
+ config.BindEnvAndSetDefault("admission_controller.inject_config.enabled", true)
+ config.BindEnvAndSetDefault("admission_controller.inject_config.endpoint", "/injectconfig")
+ config.BindEnvAndSetDefault("admission_controller.inject_config.inject_container_name", false)
+ config.BindEnvAndSetDefault("admission_controller.inject_config.mode", "hostip") // possible values: hostip / service / socket
+ config.BindEnvAndSetDefault("admission_controller.inject_config.local_service_name", "datadog")
+ config.BindEnvAndSetDefault("admission_controller.inject_config.socket_path", "/var/run/datadog")
+ config.BindEnvAndSetDefault("admission_controller.inject_config.trace_agent_socket", "unix:///var/run/datadog/apm.socket")
+ config.BindEnvAndSetDefault("admission_controller.inject_config.dogstatsd_socket", "unix:///var/run/datadog/dsd.socket")
+ config.BindEnvAndSetDefault("admission_controller.inject_tags.enabled", true)
+ config.BindEnvAndSetDefault("admission_controller.inject_tags.endpoint", "/injecttags")
+ config.BindEnvAndSetDefault("admission_controller.inject_tags.pod_owners_cache_validity", 10) // in minutes
+ config.BindEnv("admission_controller.pod_owners_cache_validity") // Alias for admission_controller.inject_tags.pod_owners_cache_validity. Was added without the "inject_tags" prefix by mistake but needs to be kept for backwards compatibility
+ config.BindEnvAndSetDefault("admission_controller.namespace_selector_fallback", false)
+ config.BindEnvAndSetDefault("admission_controller.failure_policy", "Ignore")
+ config.BindEnvAndSetDefault("admission_controller.reinvocation_policy", "IfNeeded")
+ config.BindEnvAndSetDefault("admission_controller.add_aks_selectors", false) // adds in the webhook some selectors that are required in AKS
+ config.BindEnvAndSetDefault("admission_controller.auto_instrumentation.enabled", true)
+ config.BindEnvAndSetDefault("admission_controller.auto_instrumentation.endpoint", "/injectlib")
+ config.BindEnv("admission_controller.auto_instrumentation.container_registry")
+ config.BindEnvAndSetDefault("admission_controller.auto_instrumentation.patcher.enabled", false)
+ config.BindEnvAndSetDefault("admission_controller.auto_instrumentation.patcher.fallback_to_file_provider", false) // to be enabled only in e2e tests
+ config.BindEnvAndSetDefault("admission_controller.auto_instrumentation.patcher.file_provider_path", "/etc/datadog-agent/patch/auto-instru.json") // to be used only in e2e tests
+ config.BindEnvAndSetDefault("admission_controller.auto_instrumentation.inject_auto_detected_libraries", false) // allows injecting libraries for languages detected by automatic language detection feature
+ config.BindEnv("admission_controller.auto_instrumentation.init_resources.cpu")
+ config.BindEnv("admission_controller.auto_instrumentation.init_resources.memory")
+ config.BindEnv("admission_controller.auto_instrumentation.asm.enabled", "DD_ADMISSION_CONTROLLER_AUTO_INSTRUMENTATION_APPSEC_ENABLED") // config for ASM which is implemented in the client libraries
+ config.BindEnv("admission_controller.auto_instrumentation.iast.enabled", "DD_ADMISSION_CONTROLLER_AUTO_INSTRUMENTATION_IAST_ENABLED") // config for IAST which is implemented in the client libraries
+ config.BindEnv("admission_controller.auto_instrumentation.asm_sca.enabled", "DD_ADMISSION_CONTROLLER_AUTO_INSTRUMENTATION_APPSEC_SCA_ENABLED") // config for SCA
+ config.BindEnvAndSetDefault("admission_controller.cws_instrumentation.enabled", false)
+ config.BindEnvAndSetDefault("admission_controller.cws_instrumentation.pod_endpoint", "/inject-pod-cws")
+ config.BindEnvAndSetDefault("admission_controller.cws_instrumentation.command_endpoint", "/inject-command-cws")
+ config.BindEnvAndSetDefault("admission_controller.cws_instrumentation.include", []string{})
+ config.BindEnvAndSetDefault("admission_controller.cws_instrumentation.exclude", []string{})
+ config.BindEnvAndSetDefault("admission_controller.cws_instrumentation.mutate_unlabelled", true)
+ config.BindEnv("admission_controller.cws_instrumentation.container_registry")
+ config.BindEnvAndSetDefault("admission_controller.cws_instrumentation.image_name", "cws-instrumentation")
+ config.BindEnvAndSetDefault("admission_controller.cws_instrumentation.image_tag", "latest")
+ config.BindEnv("admission_controller.cws_instrumentation.init_resources.cpu")
+ config.BindEnv("admission_controller.cws_instrumentation.init_resources.memory")
+ config.BindEnvAndSetDefault("admission_controller.cws_instrumentation.mode", "remote_copy")
+ config.BindEnvAndSetDefault("admission_controller.cws_instrumentation.remote_copy.mount_volume", false)
+ config.BindEnvAndSetDefault("admission_controller.agent_sidecar.enabled", false)
+ config.BindEnvAndSetDefault("admission_controller.agent_sidecar.provider", "")
+ config.BindEnvAndSetDefault("admission_controller.agent_sidecar.endpoint", "/agentsidecar")
+ // Should be able to parse it to a list of webhook selectors
+ config.BindEnvAndSetDefault("admission_controller.agent_sidecar.selectors", "[]")
+ // Should be able to parse it to a list of env vars and resource limits
+ config.BindEnvAndSetDefault("admission_controller.agent_sidecar.profiles", "[]")
+ config.BindEnv("admission_controller.agent_sidecar.container_registry")
+ config.BindEnvAndSetDefault("admission_controller.agent_sidecar.image_name", "agent")
+ config.BindEnvAndSetDefault("admission_controller.agent_sidecar.image_tag", "latest")
+ config.BindEnvAndSetDefault("admission_controller.agent_sidecar.cluster_agent.enabled", "true")
+
+ // Telemetry
+ // Enable telemetry metrics on the internals of the Agent.
+ // This create a lot of billable custom metrics.
+ config.BindEnvAndSetDefault("telemetry.enabled", false)
+ config.BindEnvAndSetDefault("telemetry.dogstatsd_origin", false)
+ config.BindEnvAndSetDefault("telemetry.python_memory", true)
+ config.BindEnv("telemetry.checks")
+ // We're using []string as a default instead of []float64 because viper can only parse list of string from the environment
+ //
+ // The histogram buckets use to track the time in nanoseconds DogStatsD listeners are not reading/waiting new data
+ config.BindEnvAndSetDefault("telemetry.dogstatsd.listeners_latency_buckets", []string{})
+ // The histogram buckets use to track the time in nanoseconds it takes for the DogStatsD server to push data to the aggregator
+ config.BindEnvAndSetDefault("telemetry.dogstatsd.aggregator_channel_latency_buckets", []string{})
+ // The histogram buckets use to track the time in nanoseconds it takes for a DogStatsD listeners to push data to the server
+ config.BindEnvAndSetDefault("telemetry.dogstatsd.listeners_channel_latency_buckets", []string{})
+
+ // Agent Telemetry. It is experimental feature and is subject to change.
+ // It should not be enabled unless prompted by Datadog Support
+ config.BindEnvAndSetDefault("agent_telemetry.enabled", false)
+
+ // Declare other keys that don't have a default/env var.
+ // Mostly, keys we use IsSet() on, because IsSet always returns true if a key has a default.
+ config.SetKnown("metadata_providers")
+ config.SetKnown("config_providers")
+ config.SetKnown("cluster_name")
+ config.SetKnown("listeners")
+ config.SetKnown("proxy.http")
+ config.SetKnown("proxy.https")
+ config.SetKnown("proxy.no_proxy")
+
+ // Orchestrator Explorer DCA and process-agent
+ config.BindEnvAndSetDefault("orchestrator_explorer.enabled", false)
+ // enabling/disabling the environment variables & command scrubbing from the container specs
+ // this option will potentially impact the CPU usage of the agent
+ config.BindEnvAndSetDefault("orchestrator_explorer.container_scrubbing.enabled", true)
+ config.BindEnvAndSetDefault("orchestrator_explorer.custom_sensitive_words", []string{})
+ config.BindEnvAndSetDefault("orchestrator_explorer.collector_discovery.enabled", true)
+ config.BindEnv("orchestrator_explorer.max_per_message")
+ config.BindEnv("orchestrator_explorer.max_message_bytes")
+ config.BindEnv("orchestrator_explorer.orchestrator_dd_url", "DD_ORCHESTRATOR_EXPLORER_ORCHESTRATOR_DD_URL", "DD_ORCHESTRATOR_URL")
+ config.BindEnv("orchestrator_explorer.orchestrator_additional_endpoints", "DD_ORCHESTRATOR_EXPLORER_ORCHESTRATOR_ADDITIONAL_ENDPOINTS", "DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS")
+ config.BindEnv("orchestrator_explorer.use_legacy_endpoint")
+ config.BindEnvAndSetDefault("orchestrator_explorer.manifest_collection.enabled", true)
+ config.BindEnvAndSetDefault("orchestrator_explorer.manifest_collection.buffer_manifest", true)
+ config.BindEnvAndSetDefault("orchestrator_explorer.manifest_collection.buffer_flush_interval", 20*time.Second)
+ config.BindEnvAndSetDefault("orchestrator_explorer.ecs_collection.enabled", false)
+
+ // Container lifecycle configuration
+ config.BindEnvAndSetDefault("container_lifecycle.enabled", true)
+ config.BindEnvAndSetDefault("container_lifecycle.ecs_task_event.enabled", false)
+ bindEnvAndSetLogsConfigKeys(config, "container_lifecycle.")
+
+ // Container image configuration
+ config.BindEnvAndSetDefault("container_image.enabled", true)
+ bindEnvAndSetLogsConfigKeys(config, "container_image.")
+
+ // Remote process collector
+ config.BindEnvAndSetDefault("workloadmeta.local_process_collector.collection_interval", DefaultLocalProcessCollectorInterval)
+
+ // SBOM configuration
+ config.BindEnvAndSetDefault("sbom.enabled", false)
+ bindEnvAndSetLogsConfigKeys(config, "sbom.")
+
+ config.BindEnvAndSetDefault("sbom.cache_directory", filepath.Join(defaultRunPath, "sbom-agent"))
+ config.BindEnvAndSetDefault("sbom.clear_cache_on_exit", false)
+ config.BindEnvAndSetDefault("sbom.cache.max_disk_size", 1000*1000*100) // used by custom cache: max disk space used by cached objects. Not equal to max disk usage
+ config.BindEnvAndSetDefault("sbom.cache.clean_interval", "1h") // used by custom cache.
+ config.BindEnvAndSetDefault("sbom.scan_queue.base_backoff", "5m")
+ config.BindEnvAndSetDefault("sbom.scan_queue.max_backoff", "1h")
+
+ // Container SBOM configuration
+ config.BindEnvAndSetDefault("sbom.container_image.enabled", false)
+ config.BindEnvAndSetDefault("sbom.container_image.use_mount", false)
+ config.BindEnvAndSetDefault("sbom.container_image.scan_interval", 0) // Integer seconds
+ config.BindEnvAndSetDefault("sbom.container_image.scan_timeout", 10*60) // Integer seconds
+ config.BindEnvAndSetDefault("sbom.container_image.analyzers", []string{"os"})
+ config.BindEnvAndSetDefault("sbom.container_image.check_disk_usage", true)
+ config.BindEnvAndSetDefault("sbom.container_image.min_available_disk", "1Gb")
+ config.BindEnvAndSetDefault("sbom.container_image.overlayfs_direct_scan", false)
+
+ // Host SBOM configuration
+ config.BindEnvAndSetDefault("sbom.host.enabled", false)
+ config.BindEnvAndSetDefault("sbom.host.analyzers", []string{"os"})
+
+ // Orchestrator Explorer - process agent
+ // DEPRECATED in favor of `orchestrator_explorer.orchestrator_dd_url` setting. If both are set `orchestrator_explorer.orchestrator_dd_url` will take precedence.
+ config.BindEnv("process_config.orchestrator_dd_url", "DD_PROCESS_CONFIG_ORCHESTRATOR_DD_URL", "DD_PROCESS_AGENT_ORCHESTRATOR_DD_URL")
+ // DEPRECATED in favor of `orchestrator_explorer.orchestrator_additional_endpoints` setting. If both are set `orchestrator_explorer.orchestrator_additional_endpoints` will take precedence.
+ config.SetKnown("process_config.orchestrator_additional_endpoints.*")
+ config.SetKnown("orchestrator_explorer.orchestrator_additional_endpoints.*")
+ config.BindEnvAndSetDefault("orchestrator_explorer.extra_tags", []string{})
+
+ // Network
+ config.BindEnv("network.id")
+
+ // inventories
+ config.BindEnvAndSetDefault("inventories_enabled", true)
+ config.BindEnvAndSetDefault("inventories_configuration_enabled", true) // controls the agent configurations
+ config.BindEnvAndSetDefault("inventories_checks_configuration_enabled", true) // controls the checks configurations
+ config.BindEnvAndSetDefault("inventories_collect_cloud_provider_account_id", true) // collect collection of `cloud_provider_account_id`
+ // when updating the default here also update pkg/metadata/inventories/README.md
+ config.BindEnvAndSetDefault("inventories_max_interval", 0) // 0 == default interval from inventories
+ config.BindEnvAndSetDefault("inventories_min_interval", 0) // 0 == default interval from inventories
+ // Seconds to wait to sent metadata payload to the backend after startup
+ config.BindEnvAndSetDefault("inventories_first_run_delay", 60)
+
+ // Datadog security agent (common)
+ config.BindEnvAndSetDefault("security_agent.cmd_port", 5010)
+ config.BindEnvAndSetDefault("security_agent.expvar_port", 5011)
+ config.BindEnvAndSetDefault("security_agent.log_file", DefaultSecurityAgentLogFile)
+ config.BindEnvAndSetDefault("security_agent.remote_tagger", true)
+ config.BindEnvAndSetDefault("security_agent.remote_workloadmeta", false) // TODO: switch this to true when ready
+
+ // debug config to enable a remote client to receive data from the workloadmeta agent without a timeout
+ config.BindEnvAndSetDefault("workloadmeta.remote.recv_without_timeout", false)
+
+ config.BindEnvAndSetDefault("security_agent.internal_profiling.enabled", false, "DD_SECURITY_AGENT_INTERNAL_PROFILING_ENABLED")
+ config.BindEnvAndSetDefault("security_agent.internal_profiling.site", DefaultSite, "DD_SECURITY_AGENT_INTERNAL_PROFILING_SITE", "DD_SITE")
+ config.BindEnvAndSetDefault("security_agent.internal_profiling.profile_dd_url", "", "DD_SECURITY_AGENT_INTERNAL_PROFILING_DD_URL", "DD_APM_INTERNAL_PROFILING_DD_URL")
+ config.BindEnvAndSetDefault("security_agent.internal_profiling.api_key", "", "DD_SECURITY_AGENT_INTERNAL_PROFILING_API_KEY", "DD_API_KEY")
+ config.BindEnvAndSetDefault("security_agent.internal_profiling.env", "", "DD_SECURITY_AGENT_INTERNAL_PROFILING_ENV", "DD_ENV")
+ config.BindEnvAndSetDefault("security_agent.internal_profiling.period", 5*time.Minute, "DD_SECURITY_AGENT_INTERNAL_PROFILING_PERIOD")
+ config.BindEnvAndSetDefault("security_agent.internal_profiling.cpu_duration", 1*time.Minute, "DD_SECURITY_AGENT_INTERNAL_PROFILING_CPU_DURATION")
+ config.BindEnvAndSetDefault("security_agent.internal_profiling.mutex_profile_fraction", 0)
+ config.BindEnvAndSetDefault("security_agent.internal_profiling.block_profile_rate", 0)
+ config.BindEnvAndSetDefault("security_agent.internal_profiling.enable_goroutine_stacktraces", false)
+ config.BindEnvAndSetDefault("security_agent.internal_profiling.enable_block_profiling", false)
+ config.BindEnvAndSetDefault("security_agent.internal_profiling.enable_mutex_profiling", false)
+ config.BindEnvAndSetDefault("security_agent.internal_profiling.delta_profiles", true)
+ config.BindEnvAndSetDefault("security_agent.internal_profiling.unix_socket", "")
+ config.BindEnvAndSetDefault("security_agent.internal_profiling.extra_tags", []string{})
+
+ // Datadog security agent (compliance)
+ config.BindEnvAndSetDefault("compliance_config.enabled", false)
+ config.BindEnvAndSetDefault("compliance_config.xccdf.enabled", false) // deprecated, use host_benchmarks instead
+ config.BindEnvAndSetDefault("compliance_config.host_benchmarks.enabled", true)
+ config.BindEnvAndSetDefault("compliance_config.database_benchmarks.enabled", false)
+ config.BindEnvAndSetDefault("compliance_config.check_interval", 20*time.Minute)
+ config.BindEnvAndSetDefault("compliance_config.check_max_events_per_run", 100)
+ config.BindEnvAndSetDefault("compliance_config.dir", "/etc/datadog-agent/compliance.d")
+ config.BindEnv("compliance_config.run_commands_as")
+ bindEnvAndSetLogsConfigKeys(config, "compliance_config.endpoints.")
+ config.BindEnvAndSetDefault("compliance_config.metrics.enabled", false)
+ config.BindEnvAndSetDefault("compliance_config.opa.metrics.enabled", false)
+
+ // Datadog security agent (runtime)
+ config.BindEnvAndSetDefault("runtime_security_config.enabled", false)
+ if runtime.GOOS == "windows" {
+ config.BindEnvAndSetDefault("runtime_security_config.socket", "localhost:3334")
+ } else {
+ config.BindEnvAndSetDefault("runtime_security_config.socket", filepath.Join(InstallPath, "run/runtime-security.sock"))
+ }
+ config.BindEnvAndSetDefault("runtime_security_config.log_profiled_workloads", false)
+ config.BindEnvAndSetDefault("runtime_security_config.telemetry.ignore_dd_agent_containers", true)
+ config.BindEnvAndSetDefault("runtime_security_config.use_secruntime_track", false)
+ bindEnvAndSetLogsConfigKeys(config, "runtime_security_config.endpoints.")
+ bindEnvAndSetLogsConfigKeys(config, "runtime_security_config.activity_dump.remote_storage.endpoints.")
+
+ // Serverless Agent
+ config.SetDefault("serverless.enabled", false)
+ config.BindEnvAndSetDefault("serverless.logs_enabled", true)
+ config.BindEnvAndSetDefault("enhanced_metrics", true)
+ config.BindEnvAndSetDefault("capture_lambda_payload", false)
+ config.BindEnvAndSetDefault("capture_lambda_payload_max_depth", 10)
+ config.BindEnvAndSetDefault("serverless.trace_enabled", true, "DD_TRACE_ENABLED")
+ config.BindEnvAndSetDefault("serverless.trace_managed_services", true, "DD_TRACE_MANAGED_SERVICES")
+ config.BindEnvAndSetDefault("serverless.service_mapping", nil, "DD_SERVICE_MAPPING")
+
+ // trace-agent's evp_proxy
+ config.BindEnv("evp_proxy_config.enabled")
+ config.BindEnv("evp_proxy_config.dd_url")
+ config.BindEnv("evp_proxy_config.api_key")
+ config.BindEnv("evp_proxy_config.additional_endpoints")
+ config.BindEnv("evp_proxy_config.max_payload_size")
+
+ // command line options
+ config.SetKnown("cmd.check.fullsketches")
+
+ // Windows Performance Counter refresh interval in seconds (introduced in 7.40, narrowed down
+ // in 7.42). Additional information can be found where it is used (refreshPdhObjectCache())
+ // The refresh can be disabled by setting the interval to 0.
+ config.BindEnvAndSetDefault("windows_counter_refresh_interval", 60)
+
+ // Added in Agent version 7.42
+ // Limits the number of times a check will attempt to initialize a performance counter before ceasing
+ // attempts to initialize the counter. This allows the Agent to stop incurring the overhead of trying
+ // to initialize a counter that will probably never succeed. For example, when the performance counter
+ // database needs to be rebuilt or the counter is disabled.
+ // https://learn.microsoft.com/en-us/troubleshoot/windows-server/performance/manually-rebuild-performance-counters
+ //
+ // The value of this option should be chosen in consideration with the windows_counter_refresh_interval option.
+ // The performance counter cache is refreshed during subsequent attempts to intiialize a counter that failed
+ // the first time (with consideration of the windows_counter_refresh_interval value).
+ // It is unknown if it is possible for a counter that failed to initialize to later succeed without a refresh
+ // in between the attempts. Consequently, if windows_counter_refresh_interval is 0 (disabled), then this option should
+ // be 1. If this option is too small compared to the windows_counter_refresh_interval, it is possible to reach the limit
+ // before a refresh occurs. Typically there is one attempt per check run, and check runs are 15 seconds apart by default.
+ //
+ // Increasing this value may help in the rare instance where counters are not available for some time after host boot.
+ //
+ // Setting this option to 0 disables the limit and the Agent will attempt to initialize the counter forever.
+ // The default value of 20 means the Agent will retry counter intialization for roughly 5 minutes.
+ config.BindEnvAndSetDefault("windows_counter_init_failure_limit", 20)
+
+ // Vector integration
+ bindVectorOptions(config, Metrics)
+ bindVectorOptions(config, Logs)
+
+ // Datadog Agent Manager System Tray
+ config.BindEnvAndSetDefault("system_tray.log_file", "")
+
+ // Language Detection
+ config.BindEnvAndSetDefault("language_detection.enabled", false)
+ config.BindEnvAndSetDefault("language_detection.reporting.enabled", true)
+ // buffer period represents how frequently newly detected languages buffer is flushed by reporting its content to the language detection handler in the cluster agent
+ config.BindEnvAndSetDefault("language_detection.reporting.buffer_period", "10s")
+ // TTL refresh period represents how frequently actively detected languages are refreshed by reporting them again to the language detection handler in the cluster agent
+ config.BindEnvAndSetDefault("language_detection.reporting.refresh_period", "20m")
+
+ setupAPM(config)
+ OTLP(config)
+ setupProcesses(config)
+ setupMultiRegionFailover(config)
+
+ // Updater configuration
+ config.BindEnvAndSetDefault("updater.remote_updates", false)
+ config.BindEnv("updater.registry")
+ config.BindEnvAndSetDefault("updater.registry_auth", "")
+}
+
+// LoadProxyFromEnv overrides the proxy settings with environment variables
+func LoadProxyFromEnv(config pkgconfigmodel.Config) {
+ // Viper doesn't handle mixing nested variables from files and set
+ // manually. If we manually set one of the sub value for "proxy" all
+ // other values from the conf file will be shadowed when using
+ // 'config.Get("proxy")'. For that reason we first get the value from
+ // the conf files, overwrite them with the env variables and reset
+ // everything.
+
+ // When FIPS proxy is enabled we ignore proxy setting to force data to the local proxy
+ if config.GetBool("fips.enabled") {
+ log.Infof("'fips.enabled' has been set to true. Ignoring proxy setting.")
+ return
+ }
+
+ lookupEnvCaseInsensitive := func(key string) (string, bool) {
+ value, found := os.LookupEnv(key)
+ if !found {
+ value, found = os.LookupEnv(strings.ToLower(key))
+ }
+ if found {
+ log.Infof("Found '%v' env var, using it for the Agent proxy settings", key)
+ }
+ return value, found
+ }
+
+ lookupEnv := func(key string) (string, bool) {
+ value, found := os.LookupEnv(key)
+ if found {
+ log.Infof("Found '%v' env var, using it for the Agent proxy settings", key)
+ }
+ return value, found
+ }
+
+ var isSet bool
+ p := &pkgconfigmodel.Proxy{}
+ if isSet = config.IsSet("proxy"); isSet {
+ if err := config.UnmarshalKey("proxy", p); err != nil {
+ isSet = false
+ log.Errorf("Could not load proxy setting from the configuration (ignoring): %s", err)
+ }
+ }
+
+ if HTTP, found := lookupEnv("DD_PROXY_HTTP"); found {
+ isSet = true
+ p.HTTP = HTTP
+ } else if HTTP, found := lookupEnvCaseInsensitive("HTTP_PROXY"); found {
+ isSet = true
+ p.HTTP = HTTP
+ }
+
+ if HTTPS, found := lookupEnv("DD_PROXY_HTTPS"); found {
+ isSet = true
+ p.HTTPS = HTTPS
+ } else if HTTPS, found := lookupEnvCaseInsensitive("HTTPS_PROXY"); found {
+ isSet = true
+ p.HTTPS = HTTPS
+ }
+
+ if noProxy, found := lookupEnv("DD_PROXY_NO_PROXY"); found {
+ isSet = true
+ p.NoProxy = strings.Split(noProxy, " ") // space-separated list, consistent with viper
+ } else if noProxy, found := lookupEnvCaseInsensitive("NO_PROXY"); found {
+ isSet = true
+ p.NoProxy = strings.Split(noProxy, ",") // comma-separated list, consistent with other tools that use the NO_PROXY env var
+ }
+
+ if !config.GetBool("use_proxy_for_cloud_metadata") {
+ log.Debugf("'use_proxy_for_cloud_metadata' is enabled: adding cloud provider URL to the no_proxy list")
+ isSet = true
+ p.NoProxy = append(p.NoProxy,
+ "169.254.169.254", // Azure, EC2, GCE
+ "100.100.100.200", // Alibaba
+ )
+ }
+
+ // We have to set each value individually so both config.Get("proxy")
+ // and config.Get("proxy.http") work
+ if isSet {
+ config.Set("proxy.http", p.HTTP, pkgconfigmodel.SourceEnvVar)
+ config.Set("proxy.https", p.HTTPS, pkgconfigmodel.SourceEnvVar)
+
+ // If this is set to an empty []string, viper will have a type conflict when merging
+ // this config during secrets resolution. It unmarshals empty yaml lists to type
+ // []interface{}, which will then conflict with type []string and fail to merge.
+ noProxy := make([]interface{}, len(p.NoProxy))
+ for idx := range p.NoProxy {
+ noProxy[idx] = p.NoProxy[idx]
+ }
+ config.Set("proxy.no_proxy", noProxy, pkgconfigmodel.SourceEnvVar)
+ }
+}
+
+// LoadWithoutSecret reads configs files, initializes the config module without decrypting any secrets
+func LoadWithoutSecret(config pkgconfigmodel.Config, additionalEnvVars []string) (*pkgconfigmodel.Warnings, error) {
+ return LoadDatadogCustom(config, "datadog.yaml", optional.NewNoneOption[secrets.Component](), additionalEnvVars)
+}
+
+// LoadWithSecret reads config files and initializes config with decrypted secrets
+func LoadWithSecret(config pkgconfigmodel.Config, secretResolver secrets.Component, additionalEnvVars []string) (*pkgconfigmodel.Warnings, error) {
+ return LoadDatadogCustom(config, "datadog.yaml", optional.NewOption[secrets.Component](secretResolver), additionalEnvVars)
+}
+
+// Merge will merge additional configuration into an existing configuration
+func Merge(configPaths []string, config pkgconfigmodel.Config) error {
+ for _, configPath := range configPaths {
+ if f, err := os.Open(configPath); err == nil {
+ err = config.MergeConfig(f)
+ _ = f.Close()
+ if err != nil {
+ return fmt.Errorf("error merging %s config file: %w", configPath, err)
+ }
+ } else {
+ log.Infof("no config exists at %s, ignoring...", configPath)
+ }
+ }
+
+ return nil
+}
+
+func findUnknownKeys(config pkgconfigmodel.Config) []string {
+ var unknownKeys []string
+ knownKeys := config.GetKnownKeysLowercased()
+ loadedKeys := config.AllKeysLowercased()
+ for _, key := range loadedKeys {
+ if _, found := knownKeys[key]; !found {
+ // Check if any subkey terminated with a '.*' wildcard is marked as known
+ // e.g.: apm_config.* would match all sub-keys of apm_config
+ splitPath := strings.Split(key, ".")
+ for j := range splitPath {
+ subKey := strings.Join(splitPath[:j+1], ".") + ".*"
+ if _, found = knownKeys[subKey]; found {
+ break
+ }
+ }
+ if !found {
+ unknownKeys = append(unknownKeys, key)
+ }
+ }
+ }
+ return unknownKeys
+}
+
+func findUnexpectedUnicode(config pkgconfigmodel.Config) []string {
+ messages := make([]string, 0)
+ checkAndRecordString := func(str string, prefix string) {
+ if res := FindUnexpectedUnicode(str); len(res) != 0 {
+ for _, detected := range res {
+ msg := fmt.Sprintf("%s - Unexpected unicode %s codepoint '%U' detected at byte position %v", prefix, detected.reason, detected.codepoint, detected.position)
+ messages = append(messages, msg)
+ }
+ }
+ }
+
+ var visitElement func(string, interface{})
+ visitElement = func(key string, element interface{}) {
+ switch elementValue := element.(type) {
+ case string:
+ checkAndRecordString(elementValue, fmt.Sprintf("For key '%s', configuration value string '%s'", key, elementValue))
+ case []string:
+ for _, s := range elementValue {
+ checkAndRecordString(s, fmt.Sprintf("For key '%s', configuration value string '%s'", key, s))
+ }
+ case []interface{}:
+ for _, listItem := range elementValue {
+ visitElement(key, listItem)
+ }
+ }
+ }
+
+ allKeys := config.AllKeysLowercased()
+ for _, key := range allKeys {
+ checkAndRecordString(key, fmt.Sprintf("Configuration key string '%s'", key))
+ if unknownValue := config.Get(key); unknownValue != nil {
+ visitElement(key, unknownValue)
+ }
+ }
+
+ return messages
+}
+
+func findUnknownEnvVars(config pkgconfigmodel.Config, environ []string, additionalKnownEnvVars []string) []string {
+ var unknownVars []string
+
+ knownVars := map[string]struct{}{
+ // these variables are used by the agent, but not via the Config struct,
+ // so must be listed separately.
+ "DD_INSIDE_CI": {},
+ "DD_PROXY_HTTP": {},
+ "DD_PROXY_HTTPS": {},
+ "DD_PROXY_NO_PROXY": {},
+ // these variables are used by serverless, but not via the Config struct
+ "DD_AAS_DOTNET_EXTENSION_VERSION": {},
+ "DD_AAS_EXTENSION_VERSION": {},
+ "DD_AAS_JAVA_EXTENSION_VERSION": {},
+ "DD_AGENT_PIPE_NAME": {},
+ "DD_API_KEY_SECRET_ARN": {},
+ "DD_APM_FLUSH_DEADLINE_MILLISECONDS": {},
+ "DD_APPSEC_ENABLED": {},
+ "DD_AZURE_APP_SERVICES": {},
+ "DD_DOGSTATSD_ARGS": {},
+ "DD_DOGSTATSD_PATH": {},
+ "DD_DOGSTATSD_WINDOWS_PIPE_NAME": {},
+ "DD_DOTNET_TRACER_HOME": {},
+ "DD_EXTENSION_PATH": {},
+ "DD_FLUSH_TO_LOG": {},
+ "DD_KMS_API_KEY": {},
+ "DD_INTEGRATIONS": {},
+ "DD_INTERNAL_NATIVE_LOADER_PATH": {},
+ "DD_INTERNAL_PROFILING_NATIVE_ENGINE_PATH": {},
+ "DD_LAMBDA_HANDLER": {},
+ "DD_LOGS_INJECTION": {},
+ "DD_MERGE_XRAY_TRACES": {},
+ "DD_PROFILER_EXCLUDE_PROCESSES": {},
+ "DD_PROFILING_LOG_DIR": {},
+ "DD_RUNTIME_METRICS_ENABLED": {},
+ "DD_SERVERLESS_APPSEC_ENABLED": {},
+ "DD_SERVERLESS_FLUSH_STRATEGY": {},
+ "DD_SERVICE": {},
+ "DD_TRACE_AGENT_ARGS": {},
+ "DD_TRACE_AGENT_PATH": {},
+ "DD_TRACE_AGENT_URL": {},
+ "DD_TRACE_LOG_DIRECTORY": {},
+ "DD_TRACE_LOG_PATH": {},
+ "DD_TRACE_METRICS_ENABLED": {},
+ "DD_TRACE_PIPE_NAME": {},
+ "DD_TRACE_TRANSPORT": {},
+ "DD_VERSION": {},
+ // this variable is used by CWS functional tests
+ "DD_TESTS_RUNTIME_COMPILED": {},
+ // this variable is used by the Kubernetes leader election mechanism
+ "DD_POD_NAME": {},
+ }
+ for _, key := range config.GetEnvVars() {
+ knownVars[key] = struct{}{}
+ }
+ for _, key := range additionalKnownEnvVars {
+ knownVars[key] = struct{}{}
+ }
+
+ for _, equality := range environ {
+ key := strings.SplitN(equality, "=", 2)[0]
+ if !strings.HasPrefix(key, "DD_") {
+ continue
+ }
+ if _, known := knownVars[key]; !known {
+ unknownVars = append(unknownVars, key)
+ }
+ }
+ return unknownVars
+}
+
+func useHostEtc(config pkgconfigmodel.Config) {
+ if pkgconfigenv.IsContainerized() && pathExists("/host/etc") {
+ if !config.GetBool("ignore_host_etc") {
+ if val, isSet := os.LookupEnv("HOST_ETC"); !isSet {
+ // We want to detect the host distro informations instead of the one from the container.
+ // 'HOST_ETC' is used by some libraries like gopsutil and by the system-probe to
+ // download the right kernel headers.
+ os.Setenv("HOST_ETC", "/host/etc")
+ log.Debug("Setting environment variable HOST_ETC to '/host/etc'")
+ } else {
+ log.Debugf("'/host/etc' folder detected but HOST_ETC is already set to '%s', leaving it untouched", val)
+ }
+ } else {
+ log.Debug("/host/etc detected but ignored because 'ignore_host_etc' is set to true")
+ }
+ }
+}
+
+func checkConflictingOptions(config pkgconfigmodel.Config) error {
+ // Verify that either use_podman_logs OR docker_path_override are set since they conflict
+ if config.GetBool("logs_config.use_podman_logs") && len(config.GetString("logs_config.docker_path_override")) > 0 {
+ log.Warnf("'use_podman_logs' is set to true and 'docker_path_override' is set, please use one or the other")
+ return errors.New("'use_podman_logs' is set to true and 'docker_path_override' is set, please use one or the other")
+ }
+
+ return nil
+}
+
+// LoadDatadogCustom loads the datadog config in the given config
+func LoadDatadogCustom(config pkgconfigmodel.Config, origin string, secretResolver optional.Option[secrets.Component], additionalKnownEnvVars []string) (*pkgconfigmodel.Warnings, error) {
+ // Feature detection running in a defer func as it always need to run (whether config load has been successful or not)
+ // Because some Agents (e.g. trace-agent) will run even if config file does not exist
+ defer func() {
+ // Environment feature detection needs to run before applying override funcs
+ // as it may provide such overrides
+ pkgconfigenv.DetectFeatures(config)
+ pkgconfigmodel.ApplyOverrideFuncs(config)
+ }()
+
+ warnings, err := LoadCustom(config, origin, secretResolver, additionalKnownEnvVars)
+ if err != nil {
+ if errors.Is(err, os.ErrPermission) {
+ log.Warnf("Error loading config: %v (check config file permissions for dd-agent user)", err)
+ } else {
+ log.Warnf("Error loading config: %v", err)
+ }
+ return warnings, err
+ }
+
+ err = checkConflictingOptions(config)
+ if err != nil {
+ return warnings, err
+ }
+
+ // If this variable is set to true, we'll use DefaultPython for the Python version,
+ // ignoring the python_version configuration value.
+ if ForceDefaultPython == "true" && config.IsKnown("python_version") {
+ pv := config.GetString("python_version")
+ if pv != DefaultPython {
+ log.Warnf("Python version has been forced to %s", DefaultPython)
+ }
+
+ pkgconfigmodel.AddOverride("python_version", DefaultPython)
+ }
+
+ sanitizeAPIKeyConfig(config, "api_key")
+ sanitizeAPIKeyConfig(config, "logs_config.api_key")
+ // setTracemallocEnabled *must* be called before setNumWorkers
+ warnings.TraceMallocEnabledWithPy2 = setTracemallocEnabled(config)
+ setNumWorkers(config)
+ return warnings, setupFipsEndpoints(config)
+}
+
+// LoadCustom reads config into the provided config object
+func LoadCustom(config pkgconfigmodel.Config, origin string, secretResolver optional.Option[secrets.Component], additionalKnownEnvVars []string) (*pkgconfigmodel.Warnings, error) {
+ warnings := pkgconfigmodel.Warnings{}
+
+ if err := config.ReadInConfig(); err != nil {
+ if pkgconfigenv.IsServerless() {
+ log.Debug("No config file detected, using environment variable based configuration only")
+ // Proxy settings need to be loaded from environment variables even in the absence of a datadog.yaml file
+ // The remaining code in LoadCustom is not run to keep a low cold start time
+ LoadProxyFromEnv(config)
+ return &warnings, nil
+ }
+ return &warnings, err
+ }
+
+ for _, key := range findUnknownKeys(config) {
+ log.Warnf("Unknown key in config file: %v", key)
+ }
+
+ for _, v := range findUnknownEnvVars(config, os.Environ(), additionalKnownEnvVars) {
+ log.Warnf("Unknown environment variable: %v", v)
+ }
+
+ for _, warningMsg := range findUnexpectedUnicode(config) {
+ log.Warnf(warningMsg)
+ }
+
+ // We resolve proxy setting before secrets. This allows setting secrets through DD_PROXY_* env variables
+ LoadProxyFromEnv(config)
+
+ if resolver, ok := secretResolver.Get(); ok {
+ if err := ResolveSecrets(config, resolver, origin); err != nil {
+ return &warnings, err
+ }
+ }
+
+ // Verify 'DD_URL' and 'DD_DD_URL' conflicts
+ if EnvVarAreSetAndNotEqual("DD_DD_URL", "DD_URL") {
+ log.Warnf("'DD_URL' and 'DD_DD_URL' variables are both set in environment. Using 'DD_DD_URL' value")
+ }
+
+ useHostEtc(config)
+ return &warnings, nil
+}
+
+// setupFipsEndpoints overwrites the Agent endpoint for outgoing data to be sent to the local FIPS proxy. The local FIPS
+// proxy will be in charge of forwarding data to the Datadog backend following FIPS standard. Starting from
+// fips.port_range_start we will assign a dedicated port per product (metrics, logs, traces, ...).
+func setupFipsEndpoints(config pkgconfigmodel.Config) error {
+ // Each port is dedicated to a specific data type:
+ //
+ // port_range_start: HAProxy stats
+ // port_range_start + 1: metrics
+ // port_range_start + 2: traces
+ // port_range_start + 3: profiles
+ // port_range_start + 4: processes
+ // port_range_start + 5: logs
+ // port_range_start + 6: databases monitoring metrics, metadata and activity
+ // port_range_start + 7: databases monitoring samples
+ // port_range_start + 8: network devices metadata
+ // port_range_start + 9: network devices snmp traps
+ // port_range_start + 10: instrumentation telemetry
+ // port_range_start + 11: appsec events (unused)
+ // port_range_start + 12: orchestrator explorer
+ // port_range_start + 13: runtime security
+ // port_range_start + 14: compliance
+ // port_range_start + 15: network devices netflow
+
+ if !config.GetBool("fips.enabled") {
+ log.Debug("FIPS mode is disabled")
+ return nil
+ }
+
+ const (
+ proxyStats = 0
+ metrics = 1
+ traces = 2
+ profiles = 3
+ processes = 4
+ logs = 5
+ databasesMonitoringMetrics = 6
+ databasesMonitoringSamples = 7
+ networkDevicesMetadata = 8
+ networkDevicesSnmpTraps = 9
+ instrumentationTelemetry = 10
+ appsecEvents = 11
+ orchestratorExplorer = 12
+ runtimeSecurity = 13
+ compliance = 14
+ networkDevicesNetflow = 15
+ )
+
+ localAddress, err := system.IsLocalAddress(config.GetString("fips.local_address"))
+ if err != nil {
+ return fmt.Errorf("fips.local_address: %s", err)
+ }
+
+ portRangeStart := config.GetInt("fips.port_range_start")
+ urlFor := func(port int) string { return net.JoinHostPort(localAddress, strconv.Itoa(portRangeStart+port)) }
+
+ log.Warnf("FIPS mode is enabled! All communication to DataDog will be routed to the local FIPS proxy on '%s' starting from port %d", localAddress, portRangeStart)
+
+ // Disabling proxy to make sure all data goes directly to the FIPS proxy
+ os.Unsetenv("HTTP_PROXY")
+ os.Unsetenv("HTTPS_PROXY")
+
+ config.Set("fips.https", config.GetBool("fips.https"), pkgconfigmodel.SourceAgentRuntime)
+
+ // HTTP for now, will soon be updated to HTTPS
+ protocol := "http://"
+ if config.GetBool("fips.https") {
+ protocol = "https://"
+ config.Set("skip_ssl_validation", !config.GetBool("fips.tls_verify"), pkgconfigmodel.SourceAgentRuntime)
+ }
+
+ // The following overwrites should be sync with the documentation for the fips.enabled config setting in the
+ // config_template.yaml
+
+ // Metrics
+ config.Set("dd_url", protocol+urlFor(metrics), pkgconfigmodel.SourceAgentRuntime)
+
+ // Logs
+ setupFipsLogsConfig(config, "logs_config.", urlFor(logs))
+
+ // APM
+ config.Set("apm_config.apm_dd_url", protocol+urlFor(traces), pkgconfigmodel.SourceAgentRuntime)
+ // Adding "/api/v2/profile" because it's not added to the 'apm_config.profiling_dd_url' value by the Agent
+ config.Set("apm_config.profiling_dd_url", protocol+urlFor(profiles)+"/api/v2/profile", pkgconfigmodel.SourceAgentRuntime)
+ config.Set("apm_config.telemetry.dd_url", protocol+urlFor(instrumentationTelemetry), pkgconfigmodel.SourceAgentRuntime)
+
+ // Processes
+ config.Set("process_config.process_dd_url", protocol+urlFor(processes), pkgconfigmodel.SourceAgentRuntime)
+
+ // Database monitoring
+ // Historically we used a different port for samples because the intake hostname defined in epforwarder.go was different
+ // (even though the underlying IPs were the same as the ones for DBM metrics intake hostname). We're keeping 2 ports for backward compatibility reason.
+ setupFipsLogsConfig(config, "database_monitoring.metrics.", urlFor(databasesMonitoringMetrics))
+ setupFipsLogsConfig(config, "database_monitoring.activity.", urlFor(databasesMonitoringMetrics))
+ setupFipsLogsConfig(config, "database_monitoring.samples.", urlFor(databasesMonitoringSamples))
+
+ // Network devices
+ // Internally, Viper uses multiple storages for the configuration values and values from datadog.yaml are stored
+ // in a different place from where overrides (created with config.Set(...)) are stored.
+ // Some NDM products are using UnmarshalKey() which either uses overridden data or either configuration file data but not
+ // both at the same time (see https://github.com/spf13/viper/issues/1106)
+ //
+ // Because of that we need to put all the NDM config in the overridden data store (using Set) in order to get
+ // data from the config + data created by the FIPS mode when using UnmarshalKey()
+
+ config.Set("network_devices.snmp_traps", config.Get("network_devices.snmp_traps"), pkgconfigmodel.SourceAgentRuntime)
+ setupFipsLogsConfig(config, "network_devices.metadata.", urlFor(networkDevicesMetadata))
+ config.Set("network_devices.netflow", config.Get("network_devices.netflow"), pkgconfigmodel.SourceAgentRuntime)
+ setupFipsLogsConfig(config, "network_devices.snmp_traps.forwarder.", urlFor(networkDevicesSnmpTraps))
+ setupFipsLogsConfig(config, "network_devices.netflow.forwarder.", urlFor(networkDevicesNetflow))
+
+ // Orchestrator Explorer
+ config.Set("orchestrator_explorer.orchestrator_dd_url", protocol+urlFor(orchestratorExplorer), pkgconfigmodel.SourceAgentRuntime)
+
+ // CWS
+ setupFipsLogsConfig(config, "runtime_security_config.endpoints.", urlFor(runtimeSecurity))
+
+ // Compliance
+ setupFipsLogsConfig(config, "compliance_config.endpoints.", urlFor(compliance))
+
+ return nil
+}
+
+func setupFipsLogsConfig(config pkgconfigmodel.Config, configPrefix string, url string) {
+ config.Set(configPrefix+"use_http", true, pkgconfigmodel.SourceAgentRuntime)
+ config.Set(configPrefix+"logs_no_ssl", !config.GetBool("fips.https"), pkgconfigmodel.SourceAgentRuntime)
+ config.Set(configPrefix+"logs_dd_url", url, pkgconfigmodel.SourceAgentRuntime)
+}
+
+// ResolveSecrets merges all the secret values from origin into config. Secret values
+// are identified by a value of the form "ENC[key]" where key is the secret key.
+// See: https://github.com/DataDog/datadog-agent/blob/main/docs/agent/secrets.md
+func ResolveSecrets(config pkgconfigmodel.Config, secretResolver secrets.Component, origin string) error {
+ // We have to init the secrets package before we can use it to decrypt
+ // anything.
+ secretResolver.Configure(secrets.ConfigParams{
+ Command: config.GetString("secret_backend_command"),
+ Arguments: config.GetStringSlice("secret_backend_arguments"),
+ Timeout: config.GetInt("secret_backend_timeout"),
+ MaxSize: config.GetInt("secret_backend_output_max_size"),
+ RefreshInterval: config.GetInt("secret_refresh_interval"),
+ GroupExecPerm: config.GetBool("secret_backend_command_allow_group_exec_perm"),
+ RemoveLinebreak: config.GetBool("secret_backend_remove_trailing_line_break"),
+ RunPath: config.GetString("run_path"),
+ AuditFileMaxSize: config.GetInt("secret_audit_file_max_size"),
+ })
+
+ if config.GetString("secret_backend_command") != "" {
+ // Viper doesn't expose the final location of the file it
+ // loads. Since we are searching for 'datadog.yaml' in multiple
+ // locations we let viper determine the one to use before
+ // updating it.
+ yamlConf, err := yaml.Marshal(config.AllSettings())
+ if err != nil {
+ return fmt.Errorf("unable to marshal configuration to YAML to decrypt secrets: %v", err)
+ }
+
+ secretResolver.SubscribeToChanges(func(handle, settingOrigin string, settingPath []string, oldValue, newValue any) {
+ if origin != settingOrigin {
+ return
+ }
+ if err := configAssignAtPath(config, settingPath, newValue); err != nil {
+ log.Errorf("could not assign to config: %s", err)
+ }
+ })
+ if _, err = secretResolver.Resolve(yamlConf, origin); err != nil {
+ return fmt.Errorf("unable to decrypt secret from datadog.yaml: %v", err)
+ }
+ }
+ return nil
+}
+
+// confgAssignAtPath assigns a value to the given setting of the config
+// This works around viper issues that prevent us from assigning to fields that have a dot in the
+// name (example: 'additional_endpoints.http://url.com') and also allows us to assign to individual
+// elements of a slice of items (example: 'proxy.no_proxy.0' to assign index 0 of 'no_proxy')
+func configAssignAtPath(config pkgconfigmodel.Config, settingPath []string, newValue any) error {
+ settingName := strings.Join(settingPath, ".")
+ if config.IsKnown(settingName) {
+ config.Set(settingName, newValue, pkgconfigmodel.SourceAgentRuntime)
+ return nil
+ }
+
+ // Trying to assign to an unknown config field can happen when trying to set a
+ // value inside of a compound object (a slice or a map) which allows arbitrary key
+ // values. Some settings where this happens include `additional_endpoints`, or
+ // `kubernetes_node_annotations_as_tags`, etc. Since these arbitrary keys can
+ // contain a '.' character, we are unable to use the standard `config.Set` method.
+ // Instead, we remove trailing elements from the end of the path until we find a known
+ // config field, retrieve the compound object at that point, and then use the trailing
+ // elements to figure out how to modify that particular object, before setting it back
+ // on the config.
+ //
+ // Example with the follow configuration:
+ //
+ // process_config:
+ // additional_endpoints:
+ // http://url.com:
+ // - ENC[handle_to_password]
+ //
+ // Calling this function like:
+ //
+ // configAssignAtPath(config, ['process_config', 'additional_endpoints', 'http://url.com', '0'], 'password')
+ //
+ // This is split into:
+ // ['process_config', 'additional_endpoints'] // a known config field
+ // and:
+ // ['http://url.com', '0'] // trailing elements
+ //
+ // This function will effectively do:
+ //
+ // var original map[string][]string = config.Get('process_config.additional_endpoints')
+ // var slice []string = original['http://url.com']
+ // slice[0] = 'password'
+ // config.Set('process_config.additional_endpoints', original)
+
+ trailingElements := make([]string, 0, len(settingPath))
+ // copy the path and hold onto the original, useful for error messages
+ path := slices.Clone(settingPath)
+ for {
+ if len(path) == 0 {
+ return fmt.Errorf("unknown config setting '%s'", settingPath)
+ }
+ // get the last element from the path and add it to the trailing elements
+ lastElem := path[len(path)-1]
+ trailingElements = append(trailingElements, lastElem)
+ // remove that element from the path and see if we've reached a known field
+ path = path[:len(path)-1]
+ settingName = strings.Join(path, ".")
+ if config.IsKnown(settingName) {
+ break
+ }
+ }
+ slices.Reverse(trailingElements)
+
+ // retrieve the config value at the known field
+ startingValue := config.Get(settingName)
+ iterateValue := startingValue
+ // iterate down until we find the final object that we are able to modify
+ for k, elem := range trailingElements {
+ switch modifyValue := iterateValue.(type) {
+ case map[string]interface{}:
+ if k == len(trailingElements)-1 {
+ // if we reached the final object, modify it directly by assigning the newValue parameter
+ modifyValue[elem] = newValue
+ } else {
+ // otherwise iterate inside that compound object
+ iterateValue = modifyValue[elem]
+ }
+ case map[interface{}]interface{}:
+ if k == len(trailingElements)-1 {
+ modifyValue[elem] = newValue
+ } else {
+ iterateValue = modifyValue[elem]
+ }
+ case []string:
+ index, err := strconv.Atoi(elem)
+ if err != nil {
+ return err
+ }
+ if index >= len(modifyValue) {
+ return fmt.Errorf("index out of range %d >= %d", index, len(modifyValue))
+ }
+ if k == len(trailingElements)-1 {
+ modifyValue[index] = fmt.Sprintf("%s", newValue)
+ } else {
+ iterateValue = modifyValue[index]
+ }
+ case []interface{}:
+ index, err := strconv.Atoi(elem)
+ if err != nil {
+ return err
+ }
+ if index >= len(modifyValue) {
+ return fmt.Errorf("index out of range %d >= %d", index, len(modifyValue))
+ }
+ if k == len(trailingElements)-1 {
+ modifyValue[index] = newValue
+ } else {
+ iterateValue = modifyValue[index]
+ }
+ default:
+ return fmt.Errorf("cannot assign to setting '%s' of type %T", settingPath, iterateValue)
+ }
+ }
+
+ config.Set(settingName, startingValue, pkgconfigmodel.SourceAgentRuntime)
+ return nil
+}
+
+// EnvVarAreSetAndNotEqual returns true if two given variables are set in environment and are not equal.
+func EnvVarAreSetAndNotEqual(lhsName string, rhsName string) bool {
+ lhsValue, lhsIsSet := os.LookupEnv(lhsName)
+ rhsValue, rhsIsSet := os.LookupEnv(rhsName)
+
+ return lhsIsSet && rhsIsSet && lhsValue != rhsValue
+}
+
+// sanitizeAPIKeyConfig strips newlines and other control characters from a given key.
+func sanitizeAPIKeyConfig(config pkgconfigmodel.Config, key string) {
+ if !config.IsKnown(key) || !config.IsSet(key) {
+ return
+ }
+ config.Set(key, strings.TrimSpace(config.GetString(key)), pkgconfigmodel.SourceAgentRuntime)
+}
+
+// sanitizeExternalMetricsProviderChunkSize ensures the value of `external_metrics_provider.chunk_size` is within an acceptable range
+func sanitizeExternalMetricsProviderChunkSize(config pkgconfigmodel.Config) {
+ if !config.IsKnown("external_metrics_provider.chunk_size") {
+ return
+ }
+
+ chunkSize := config.GetInt("external_metrics_provider.chunk_size")
+ if chunkSize <= 0 {
+ log.Warnf("external_metrics_provider.chunk_size cannot be negative: %d", chunkSize)
+ config.Set("external_metrics_provider.chunk_size", 1, pkgconfigmodel.SourceAgentRuntime)
+ }
+ if chunkSize > maxExternalMetricsProviderChunkSize {
+ log.Warnf("external_metrics_provider.chunk_size has been set to %d, which is higher than the maximum allowed value %d. Using %d.", chunkSize, maxExternalMetricsProviderChunkSize, maxExternalMetricsProviderChunkSize)
+ config.Set("external_metrics_provider.chunk_size", maxExternalMetricsProviderChunkSize, pkgconfigmodel.SourceAgentRuntime)
+ }
+}
+
+func bindEnvAndSetLogsConfigKeys(config pkgconfigmodel.Config, prefix string) {
+ config.BindEnv(prefix + "logs_dd_url") // Send the logs to a proxy. Must respect format ':' and '' to be an integer
+ config.BindEnv(prefix + "dd_url")
+ config.BindEnv(prefix + "additional_endpoints")
+ config.BindEnvAndSetDefault(prefix+"use_compression", true)
+ config.BindEnvAndSetDefault(prefix+"compression_level", 6) // Default level for the gzip/deflate algorithm
+ config.BindEnvAndSetDefault(prefix+"batch_wait", DefaultBatchWait)
+ config.BindEnvAndSetDefault(prefix+"connection_reset_interval", 0) // in seconds, 0 means disabled
+ config.BindEnvAndSetDefault(prefix+"logs_no_ssl", false)
+ config.BindEnvAndSetDefault(prefix+"batch_max_concurrent_send", DefaultBatchMaxConcurrentSend)
+ config.BindEnvAndSetDefault(prefix+"batch_max_content_size", DefaultBatchMaxContentSize)
+ config.BindEnvAndSetDefault(prefix+"batch_max_size", DefaultBatchMaxSize)
+ config.BindEnvAndSetDefault(prefix+"input_chan_size", DefaultInputChanSize) // Only used by EP Forwarder for now, not used by logs
+ config.BindEnvAndSetDefault(prefix+"sender_backoff_factor", DefaultLogsSenderBackoffFactor)
+ config.BindEnvAndSetDefault(prefix+"sender_backoff_base", DefaultLogsSenderBackoffBase)
+ config.BindEnvAndSetDefault(prefix+"sender_backoff_max", DefaultLogsSenderBackoffMax)
+ config.BindEnvAndSetDefault(prefix+"sender_recovery_interval", DefaultForwarderRecoveryInterval)
+ config.BindEnvAndSetDefault(prefix+"sender_recovery_reset", false)
+ config.BindEnvAndSetDefault(prefix+"use_v2_api", true)
+ config.SetKnown(prefix + "dev_mode_no_ssl")
+}
+
+// IsCloudProviderEnabled checks the cloud provider family provided in
+// pkg/util/.go against the value for cloud_provider: on the
+// global config object Datadog
+func IsCloudProviderEnabled(cloudProviderName string, config pkgconfigmodel.Reader) bool {
+ cloudProviderFromConfig := config.GetStringSlice("cloud_provider_metadata")
+
+ for _, cloudName := range cloudProviderFromConfig {
+ if strings.EqualFold(cloudName, cloudProviderName) {
+ log.Debugf("cloud_provider_metadata is set to %s in agent configuration, trying endpoints for %s Cloud Provider",
+ cloudProviderFromConfig,
+ cloudProviderName)
+ return true
+ }
+ }
+
+ log.Debugf("cloud_provider_metadata is set to %s in agent configuration, skipping %s Cloud Provider",
+ cloudProviderFromConfig,
+ cloudProviderName)
+ return false
+}
+
+// pathExists returns true if the given path exists
+func pathExists(path string) bool {
+ _, err := os.Stat(path)
+ return !os.IsNotExist(err)
+}
+
+// setTracemallocEnabled is a helper to get the effective tracemalloc
+// configuration.
+func setTracemallocEnabled(config pkgconfigmodel.Config) bool {
+ if !config.IsKnown("tracemalloc_debug") {
+ return false
+ }
+
+ pyVersion := config.GetString("python_version")
+ wTracemalloc := config.GetBool("tracemalloc_debug")
+ traceMallocEnabledWithPy2 := false
+ if pyVersion == "2" && wTracemalloc {
+ log.Warnf("Tracemalloc was enabled but unavailable with python version %q, disabling.", pyVersion)
+ wTracemalloc = false
+ traceMallocEnabledWithPy2 = true
+ }
+
+ // update config with the actual effective tracemalloc
+ config.Set("tracemalloc_debug", wTracemalloc, pkgconfigmodel.SourceAgentRuntime)
+ return traceMallocEnabledWithPy2
+}
+
+// setNumWorkers is a helper to set the effective number of workers for
+// a given config.
+func setNumWorkers(config pkgconfigmodel.Config) {
+ if !config.IsKnown("check_runners") {
+ return
+ }
+
+ wTracemalloc := config.GetBool("tracemalloc_debug")
+ numWorkers := config.GetInt("check_runners")
+ if wTracemalloc {
+ log.Infof("Tracemalloc enabled, only one check runner enabled to run checks serially")
+ numWorkers = 1
+ }
+
+ // update config with the actual effective number of workers
+ config.Set("check_runners", numWorkers, pkgconfigmodel.SourceAgentRuntime)
+}
+
+// GetDogstatsdMappingProfiles returns mapping profiles used in DogStatsD mapper
+func GetDogstatsdMappingProfiles(config pkgconfigmodel.Reader) ([]MappingProfile, error) {
+ return getDogstatsdMappingProfilesConfig(config)
+}
+
+func getDogstatsdMappingProfilesConfig(config pkgconfigmodel.Reader) ([]MappingProfile, error) {
+ var mappings []MappingProfile
+ if config.IsSet("dogstatsd_mapper_profiles") {
+ err := config.UnmarshalKey("dogstatsd_mapper_profiles", &mappings)
+ if err != nil {
+ return []MappingProfile{}, log.Errorf("Could not parse dogstatsd_mapper_profiles: %v", err)
+ }
+ }
+ return mappings, nil
+}
+
+// IsCLCRunner returns whether the Agent is in cluster check runner mode
+func IsCLCRunner(config pkgconfigmodel.Reader) bool {
+ if !config.GetBool("clc_runner_enabled") {
+ return false
+ }
+
+ var cps []ConfigurationProviders
+ if err := config.UnmarshalKey("config_providers", &cps); err != nil {
+ return false
+ }
+
+ for _, name := range config.GetStringSlice("extra_config_providers") {
+ cps = append(cps, ConfigurationProviders{Name: name})
+ }
+
+ // A cluster check runner is an Agent configured to run clusterchecks only
+ // We want exactly one ConfigProvider named clusterchecks
+ if len(cps) == 0 {
+ return false
+ }
+
+ for _, cp := range cps {
+ if cp.Name != "clusterchecks" {
+ return false
+ }
+ }
+
+ return true
+}
+
+// GetBindHost returns `bind_host` variable or default value
+// Not using `config.BindEnvAndSetDefault` as some processes need to know
+// if value was default one or not (e.g. trace-agent)
+func GetBindHost(config pkgconfigmodel.Reader) string {
+ return GetBindHostFromConfig(config)
+}
+
+// GetBindHostFromConfig returns the bind_host value from the config
+func GetBindHostFromConfig(cfg pkgconfigmodel.Reader) string {
+ if cfg.IsSet("bind_host") {
+ return cfg.GetString("bind_host")
+ }
+ return "localhost"
+}
+
+// GetValidHostAliases validates host aliases set in `host_aliases` variable and returns
+// only valid ones.
+func GetValidHostAliases(_ context.Context, config pkgconfigmodel.Reader) ([]string, error) {
+ return getValidHostAliasesWithConfig(config), nil
+}
+
+func getValidHostAliasesWithConfig(config pkgconfigmodel.Reader) []string {
+ aliases := []string{}
+ for _, alias := range config.GetStringSlice("host_aliases") {
+ if err := validate.ValidHostname(alias); err == nil {
+ aliases = append(aliases, alias)
+ } else {
+ log.Warnf("skipping invalid host alias '%s': %s", alias, err)
+ }
+ }
+
+ return aliases
+}
+
+func bindVectorOptions(config pkgconfigmodel.Config, datatype DataType) {
+ config.BindEnvAndSetDefault(fmt.Sprintf("observability_pipelines_worker.%s.enabled", datatype), false)
+ config.BindEnvAndSetDefault(fmt.Sprintf("observability_pipelines_worker.%s.url", datatype), "")
+
+ config.BindEnvAndSetDefault(fmt.Sprintf("vector.%s.enabled", datatype), false)
+ config.BindEnvAndSetDefault(fmt.Sprintf("vector.%s.url", datatype), "")
+}
+
+// GetObsPipelineURL returns the URL under the 'observability_pipelines_worker.' prefix for the given datatype
+func GetObsPipelineURL(datatype DataType, config pkgconfigmodel.Reader) (string, error) {
+ if config.GetBool(fmt.Sprintf("observability_pipelines_worker.%s.enabled", datatype)) {
+ return getObsPipelineURLForPrefix(datatype, "observability_pipelines_worker", config)
+ } else if config.GetBool(fmt.Sprintf("vector.%s.enabled", datatype)) {
+ // Fallback to the `vector` config if observability_pipelines_worker is not set.
+ return getObsPipelineURLForPrefix(datatype, "vector", config)
+ }
+ return "", nil
+}
+
+func getObsPipelineURLForPrefix(datatype DataType, prefix string, config pkgconfigmodel.Reader) (string, error) {
+ if config.GetBool(fmt.Sprintf("%s.%s.enabled", prefix, datatype)) {
+ pipelineURL := config.GetString(fmt.Sprintf("%s.%s.url", prefix, datatype))
+ if pipelineURL == "" {
+ log.Errorf("%s.%s.enabled is set to true, but %s.%s.url is empty", prefix, datatype, prefix, datatype)
+ return "", nil
+ }
+ _, err := url.Parse(pipelineURL)
+ if err != nil {
+ return "", fmt.Errorf("could not parse %s %s endpoint: %s", prefix, datatype, err)
+ }
+ return pipelineURL, nil
+ }
+ return "", nil
+}
+
+// IsRemoteConfigEnabled returns true if Remote Configuration should be enabled
+func IsRemoteConfigEnabled(cfg pkgconfigmodel.Reader) bool {
+ // Disable Remote Config for GovCloud
+ if cfg.GetBool("fips.enabled") || cfg.GetString("site") == "ddog-gov.com" {
+ return false
+ }
+ return cfg.GetBool("remote_configuration.enabled")
+}
+
+// GetRemoteConfigurationAllowedIntegrations returns the list of integrations that can be scheduled
+// with remote-config
+func GetRemoteConfigurationAllowedIntegrations(cfg pkgconfigmodel.Reader) map[string]bool {
+ allowList := cfg.GetStringSlice("remote_configuration.agent_integrations.allow_list")
+ allowMap := map[string]bool{}
+ for _, integration := range allowList {
+ allowMap[strings.ToLower(integration)] = true
+ }
+
+ blockList := cfg.GetStringSlice("remote_configuration.agent_integrations.block_list")
+ for _, blockedIntegration := range blockList {
+ allowMap[strings.ToLower(blockedIntegration)] = false
+ }
+
+ return allowMap
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/config_darwin.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/config_darwin.go
new file mode 100644
index 0000000000..7c1dde6728
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/config_darwin.go
@@ -0,0 +1,33 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package setup
+
+const (
+ defaultConfdPath = "/opt/datadog-agent/etc/conf.d"
+ defaultAdditionalChecksPath = "/opt/datadog-agent/etc/checks.d"
+ defaultRunPath = "/opt/datadog-agent/run"
+ defaultGuiPort = 5002
+ // DefaultUpdaterLogFile is the default updater log file
+ DefaultUpdaterLogFile = "/opt/datadog-agent/logs/updater.log"
+ // DefaultSecurityAgentLogFile points to the log file that will be used by the security-agent if not configured
+ DefaultSecurityAgentLogFile = "/opt/datadog-agent/logs/security-agent.log"
+ // DefaultProcessAgentLogFile is the default process-agent log file
+ DefaultProcessAgentLogFile = "/opt/datadog-agent/logs/process-agent.log"
+ // defaultSystemProbeAddress is the default unix socket path to be used for connecting to the system probe
+ defaultSystemProbeAddress = "/opt/datadog-agent/run/sysprobe.sock"
+ // defaultEventMonitorAddress is the default unix socket path to be used for connecting to the event monitor
+ defaultEventMonitorAddress = "/opt/datadog-agent/run/event-monitor.sock"
+ defaultSystemProbeLogFilePath = "/opt/datadog-agent/logs/system-probe.log"
+ // DefaultDDAgentBin the process agent's binary
+ DefaultDDAgentBin = "/opt/datadog-agent/bin/agent/agent"
+ // InstallPath is the default install path for the agent
+ InstallPath = "/opt/datadog-agent"
+)
+
+// called by init in config.go, to ensure any os-specific config is done
+// in time
+func osinit() {
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/config_nix.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/config_nix.go
new file mode 100644
index 0000000000..7ef3114861
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/config_nix.go
@@ -0,0 +1,46 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux || freebsd || netbsd || openbsd || solaris || dragonfly || aix
+
+package setup
+
+import "path/filepath"
+
+// Variables to initialize at build time
+var (
+ // InstallPath is the default install path for the agent
+ // It might be overridden at build time
+ InstallPath = "/opt/datadog-agent"
+)
+
+var (
+ defaultRunPath = filepath.Join(InstallPath, "run")
+ // defaultSystemProbeAddress is the default unix socket path to be used for connecting to the system probe
+ defaultSystemProbeAddress = filepath.Join(InstallPath, "run/sysprobe.sock")
+ // defaultEventMonitorAddress is the default unix socket path to be used for connecting to the event monitor
+ defaultEventMonitorAddress = filepath.Join(InstallPath, "run/event-monitor.sock")
+ // DefaultDDAgentBin the process agent's binary
+ DefaultDDAgentBin = filepath.Join(InstallPath, "bin/agent/agent")
+)
+
+const (
+ defaultConfdPath = "/etc/datadog-agent/conf.d"
+ defaultAdditionalChecksPath = "/etc/datadog-agent/checks.d"
+ defaultGuiPort = -1
+ // DefaultUpdaterLogFile is the default updater log file
+ DefaultUpdaterLogFile = "/var/log/datadog/updater.log"
+ // DefaultSecurityAgentLogFile points to the log file that will be used by the security-agent if not configured
+ DefaultSecurityAgentLogFile = "/var/log/datadog/security-agent.log"
+ // DefaultProcessAgentLogFile is the default process-agent log file
+ DefaultProcessAgentLogFile = "/var/log/datadog/process-agent.log"
+ // defaultSystemProbeLogFilePath is the default system-probe log file
+ defaultSystemProbeLogFilePath = "/var/log/datadog/system-probe.log"
+)
+
+// called by init in config.go, to ensure any os-specific config is done
+// in time
+func osinit() {
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/config_windows.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/config_windows.go
new file mode 100644
index 0000000000..695508c979
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/config_windows.go
@@ -0,0 +1,57 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package setup
+
+import (
+ "os"
+ "path/filepath"
+
+ "github.com/DataDog/datadog-agent/pkg/util/executable"
+ "github.com/DataDog/datadog-agent/pkg/util/winutil"
+)
+
+var (
+ defaultConfdPath = "c:\\programdata\\datadog\\conf.d"
+ defaultAdditionalChecksPath = "c:\\programdata\\datadog\\checks.d"
+ defaultRunPath = "c:\\programdata\\datadog\\run"
+ defaultGuiPort = 5002
+ // DefaultUpdaterLogFile is the default updater log file
+ DefaultUpdaterLogFile = "c:\\programdata\\datadog\\logs\\updater.log"
+ // DefaultSecurityAgentLogFile points to the log file that will be used by the security-agent if not configured
+ DefaultSecurityAgentLogFile = "c:\\programdata\\datadog\\logs\\security-agent.log"
+ // DefaultProcessAgentLogFile is the default process-agent log file
+ DefaultProcessAgentLogFile = "C:\\ProgramData\\Datadog\\logs\\process-agent.log"
+
+ // defaultSystemProbeAddress is the default address to be used for connecting to the system probe
+ defaultSystemProbeAddress = "localhost:3333"
+ // defaultEventMonitorAddress is the default address to be used for connecting to the event monitor
+ defaultEventMonitorAddress = "localhost:3335"
+ defaultSystemProbeLogFilePath = "c:\\programdata\\datadog\\logs\\system-probe.log"
+ // DefaultDDAgentBin the process agent's binary
+ DefaultDDAgentBin = "c:\\Program Files\\Datadog\\Datadog Agent\\bin\\agent.exe"
+ // InstallPath is the default install path for the agent
+ InstallPath = "c:\\Program Files\\Datadog\\Datadog Agent"
+)
+
+func osinit() {
+ pd, err := winutil.GetProgramDataDir()
+ if err == nil {
+ defaultConfdPath = filepath.Join(pd, "conf.d")
+ defaultAdditionalChecksPath = filepath.Join(pd, "checks.d")
+ defaultRunPath = filepath.Join(pd, "run")
+ DefaultSecurityAgentLogFile = filepath.Join(pd, "logs", "security-agent.log")
+ defaultSystemProbeLogFilePath = filepath.Join(pd, "logs", "system-probe.log")
+ DefaultProcessAgentLogFile = filepath.Join(pd, "logs", "process-agent.log")
+ }
+
+ // Process Agent
+ if _here, err := executable.Folder(); err == nil {
+ agentFilePath := filepath.Join(_here, "..", "..", "embedded", "agent.exe")
+ if _, err := os.Stat(agentFilePath); err == nil {
+ DefaultDDAgentBin = agentFilePath
+ }
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/constants/constants.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/constants/constants.go
new file mode 100644
index 0000000000..d23620e464
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/constants/constants.go
@@ -0,0 +1,12 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package constants holds constants
+package constants
+
+const (
+ // DefaultEBPFLessProbeAddr defines the default ebpfless probe address
+ DefaultEBPFLessProbeAddr = "localhost:5678"
+)
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/ipc_address.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/ipc_address.go
new file mode 100644
index 0000000000..e6320fb17a
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/ipc_address.go
@@ -0,0 +1,38 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package setup
+
+import (
+ "fmt"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+ "github.com/DataDog/datadog-agent/pkg/util/system"
+)
+
+// GetIPCAddress returns the IPC address or an error if the address is not local
+func GetIPCAddress(config pkgconfigmodel.Reader) (string, error) {
+ var key string
+ // ipc_address is deprecated in favor of cmd_host, but we still need to support it
+ // if it is set, use it, otherwise use cmd_host
+ if config.IsSet("ipc_address") {
+ log.Warn("ipc_address is deprecated, use cmd_host instead")
+ key = "ipc_address"
+ } else {
+ key = "cmd_host"
+ }
+
+ address, err := system.IsLocalAddress(config.GetString(key))
+ if err != nil {
+ return "", fmt.Errorf("%s: %s", key, err)
+ }
+ return address, nil
+}
+
+// GetIPCPort returns the IPC port
+func GetIPCPort() string {
+ return Datadog.GetString("cmd_port")
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/multi_region_failover.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/multi_region_failover.go
new file mode 100644
index 0000000000..2b1b0d5179
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/multi_region_failover.go
@@ -0,0 +1,31 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package setup
+
+import (
+ "time"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+)
+
+func setupMultiRegionFailover(config pkgconfigmodel.Config) {
+ config.BindEnv("multi_region_failover.api_key")
+ config.BindEnv("multi_region_failover.site")
+ config.BindEnv("multi_region_failover.dd_url")
+ config.BindEnvAndSetDefault("multi_region_failover.enabled", false)
+ config.BindEnvAndSetDefault("multi_region_failover.failover_metrics", false)
+ config.BindEnvAndSetDefault("multi_region_failover.failover_logs", false)
+
+ config.BindEnv("multi_region_failover.remote_configuration.refresh_interval")
+ config.BindEnv("multi_region_failover.remote_configuration.max_backoff_time")
+ config.BindEnvAndSetDefault("multi_region_failover.remote_configuration.max_backoff_interval", 5*time.Minute)
+ config.BindEnv("multi_region_failover.remote_configuration.config_root")
+ config.BindEnv("multi_region_failover.remote_configuration.director_root")
+ config.BindEnv("multi_region_failover.remote_configuration.key")
+
+ config.BindEnvAndSetDefault("multi_region_failover.remote_configuration.clients.ttl_seconds", 30*time.Second)
+ config.BindEnvAndSetDefault("multi_region_failover.remote_configuration.clients.cache_bypass_limit", 5)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/otlp.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/otlp.go
new file mode 100644
index 0000000000..c42f7eb671
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/otlp.go
@@ -0,0 +1,98 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021-present Datadog, Inc.
+
+package setup
+
+import (
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+)
+
+// OTLP configuration paths.
+const (
+ OTLPSection = "otlp_config"
+ OTLPTracesSubSectionKey = "traces"
+ OTLPTracePort = OTLPSection + "." + OTLPTracesSubSectionKey + ".internal_port"
+ OTLPTracesEnabled = OTLPSection + "." + OTLPTracesSubSectionKey + ".enabled"
+ OTLPLogsSubSectionKey = "logs"
+ OTLPLogsEnabled = OTLPSection + "." + OTLPLogsSubSectionKey + ".enabled"
+ OTLPReceiverSubSectionKey = "receiver"
+ OTLPReceiverSection = OTLPSection + "." + OTLPReceiverSubSectionKey
+ OTLPMetricsSubSectionKey = "metrics"
+ OTLPMetrics = OTLPSection + "." + OTLPMetricsSubSectionKey
+ OTLPMetricsEnabled = OTLPSection + "." + OTLPMetricsSubSectionKey + ".enabled"
+ OTLPTagCardinalityKey = OTLPMetrics + ".tag_cardinality"
+ OTLPDebugKey = "debug"
+ OTLPDebug = OTLPSection + "." + OTLPDebugKey
+)
+
+// OTLP related configuration.
+func OTLP(config pkgconfigmodel.Config) {
+ config.BindEnvAndSetDefault(OTLPTracePort, 5003)
+ config.BindEnvAndSetDefault(OTLPMetricsEnabled, true)
+ config.BindEnvAndSetDefault(OTLPTracesEnabled, true)
+ config.BindEnvAndSetDefault(OTLPLogsEnabled, false)
+
+ // NOTE: This only partially works.
+ // The environment variable is also manually checked in comp/otelcol/otlp/config.go
+ config.BindEnvAndSetDefault(OTLPTagCardinalityKey, "low", "DD_OTLP_TAG_CARDINALITY")
+
+ config.SetKnown(OTLPMetrics)
+ // Set all subkeys of otlp_config.metrics as known
+ config.SetKnown(OTLPMetrics + ".*")
+ config.SetKnown(OTLPReceiverSection)
+ // Set all subkeys of otlp_config.receiver as known
+ config.SetKnown(OTLPReceiverSection + ".*")
+ config.SetKnown(OTLPDebug)
+ // Set all subkeys of otlp_config.debug as known
+ config.SetKnown(OTLPDebug + ".*")
+
+ // set environment variables for selected fields
+ setupOTLPEnvironmentVariables(config)
+}
+
+// setupOTLPEnvironmentVariables sets up the environment variables associated with different OTLP ingest settings:
+// If there are changes in the OTLP receiver configuration, they should be reflected here.
+//
+// We don't need to set the default value: it is dealt with at the unmarshaling level
+// since we get the configuration through GetStringMap
+//
+// We are missing TLS settings: since some of them need more work to work right they are not included here.
+func setupOTLPEnvironmentVariables(config pkgconfigmodel.Config) {
+ // gRPC settings
+ config.BindEnv(OTLPSection + ".receiver.protocols.grpc.endpoint")
+ config.BindEnv(OTLPSection + ".receiver.protocols.grpc.transport")
+ config.BindEnv(OTLPSection + ".receiver.protocols.grpc.max_recv_msg_size_mib")
+ config.BindEnv(OTLPSection + ".receiver.protocols.grpc.max_concurrent_streams")
+ config.BindEnv(OTLPSection + ".receiver.protocols.grpc.read_buffer_size")
+ config.BindEnv(OTLPSection + ".receiver.protocols.grpc.write_buffer_size")
+ config.BindEnv(OTLPSection + ".receiver.protocols.grpc.include_metadata")
+
+ // Traces settings
+ config.BindEnvAndSetDefault("otlp_config.traces.span_name_remappings", map[string]string{})
+ config.BindEnv("otlp_config.traces.span_name_as_resource_name")
+ config.BindEnvAndSetDefault("otlp_config.traces.probabilistic_sampler.sampling_percentage", 100.,
+ "DD_OTLP_CONFIG_TRACES_PROBABILISTIC_SAMPLER_SAMPLING_PERCENTAGE")
+
+ // HTTP settings
+ config.BindEnv(OTLPSection + ".receiver.protocols.http.endpoint")
+ config.BindEnv(OTLPSection + ".receiver.protocols.http.max_request_body_size")
+ config.BindEnv(OTLPSection + ".receiver.protocols.http.include_metadata")
+
+ // Metrics settings
+ config.BindEnv(OTLPSection + ".metrics.delta_ttl")
+ config.BindEnv(OTLPSection + ".metrics.resource_attributes_as_tags")
+ config.BindEnv(OTLPSection + ".metrics.instrumentation_library_metadata_as_tags")
+ config.BindEnv(OTLPSection + ".metrics.instrumentation_scope_metadata_as_tags")
+ config.BindEnv(OTLPSection + ".metrics.tag_cardinality")
+ config.BindEnv(OTLPSection + ".metrics.histograms.mode")
+ config.BindEnv(OTLPSection + ".metrics.histograms.send_count_sum_metrics")
+ config.BindEnv(OTLPSection + ".metrics.histograms.send_aggregation_metrics")
+ config.BindEnv(OTLPSection + ".metrics.sums.cumulative_monotonic_mode")
+ config.BindEnv(OTLPSection + ".metrics.summaries.mode")
+
+ // Debug settings
+ config.BindEnv(OTLPSection + ".debug.loglevel") // Deprecated
+ config.BindEnv(OTLPSection + ".debug.verbosity")
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/process.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/process.go
new file mode 100644
index 0000000000..2a10f62cb2
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/process.go
@@ -0,0 +1,252 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package setup
+
+import (
+ "net"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+const (
+ // DefaultGRPCConnectionTimeoutSecs sets the default value for timeout when connecting to the agent
+ DefaultGRPCConnectionTimeoutSecs = 60
+
+ // DefaultProcessQueueSize is the default max amount of process-agent checks that can be buffered in memory if the forwarder can't consume them fast enough (e.g. due to network disruption)
+ // This can be fairly high as the input should get throttled by queue bytes first.
+ // Assuming we generate ~8 checks/minute (for process/network), this should allow buffering of ~30 minutes of data assuming it fits within the queue bytes memory budget
+ DefaultProcessQueueSize = 256
+
+ // DefaultProcessRTQueueSize is the default max amount of process-agent realtime checks that can be buffered in memory
+ // We set a small queue size for real-time message because they get staled very quickly, thus we only keep the latest several payloads
+ DefaultProcessRTQueueSize = 5
+
+ // DefaultProcessQueueBytes is the default amount of process-agent check data (in bytes) that can be buffered in memory
+ // Allow buffering up to 60 megabytes of payload data in total
+ DefaultProcessQueueBytes = 60 * 1000 * 1000
+
+ // DefaultProcessMaxPerMessage is the default maximum number of processes, or containers per message. Note: Only change if the defaults are causing issues.
+ DefaultProcessMaxPerMessage = 100
+
+ // ProcessMaxPerMessageLimit is the maximum allowed value for maximum number of processes, or containers per message.
+ ProcessMaxPerMessageLimit = 10000
+
+ // DefaultProcessMaxMessageBytes is the default max for size of a message containing processes or container data. Note: Only change if the defaults are causing issues.
+ DefaultProcessMaxMessageBytes = 1000000
+
+ // ProcessMaxMessageBytesLimit is the maximum allowed value for the maximum size of a message containing processes or container data.
+ ProcessMaxMessageBytesLimit = 4000000
+
+ // DefaultProcessExpVarPort is the default port used by the process-agent expvar server
+ DefaultProcessExpVarPort = 6062
+
+ // DefaultProcessCmdPort is the default port used by process-agent to run a runtime settings server
+ DefaultProcessCmdPort = 6162
+
+ // DefaultProcessEntityStreamPort is the default port used by the process-agent to expose Process Entities
+ DefaultProcessEntityStreamPort = 6262
+
+ // DefaultProcessEndpoint is the default endpoint for the process agent to send payloads to
+ DefaultProcessEndpoint = "https://process.datadoghq.com"
+
+ // DefaultProcessEventsEndpoint is the default endpoint for the process agent to send event payloads to
+ DefaultProcessEventsEndpoint = "https://process-events.datadoghq.com"
+
+ // DefaultProcessEventStoreMaxItems is the default maximum amount of events that can be stored in the Event Store
+ DefaultProcessEventStoreMaxItems = 200
+
+ // DefaultProcessEventStoreMaxPendingPushes is the default amount of pending push operations can be handled by the Event Store
+ DefaultProcessEventStoreMaxPendingPushes = 10
+
+ // DefaultProcessEventStoreMaxPendingPulls is the default amount of pending pull operations can be handled by the Event Store
+ DefaultProcessEventStoreMaxPendingPulls = 10
+
+ // DefaultProcessEventStoreStatsInterval is the default frequency at which the event store sends stats about expired events, in seconds
+ DefaultProcessEventStoreStatsInterval = 20
+
+ // DefaultProcessEventsMinCheckInterval is the minimum interval allowed for the process_events check
+ DefaultProcessEventsMinCheckInterval = time.Second
+
+ // DefaultProcessEventsCheckInterval is the default interval used by the process_events check
+ DefaultProcessEventsCheckInterval = 10 * time.Second
+
+ // DefaultProcessDiscoveryHintFrequency is the default frequency in terms of number of checks which we send a process discovery hint
+ DefaultProcessDiscoveryHintFrequency = 60
+)
+
+// setupProcesses is meant to be called multiple times for different configs, but overrides apply to all configs, so
+// we need to make sure it is only applied once
+var processesAddOverrideOnce sync.Once
+
+// procBindEnvAndSetDefault is a helper function that generates both "DD_PROCESS_CONFIG_" and "DD_PROCESS_AGENT_" prefixes from a key.
+// We need this helper function because the standard BindEnvAndSetDefault can only generate one prefix from a key.
+func procBindEnvAndSetDefault(config pkgconfigmodel.Config, key string, val interface{}) {
+ // Uppercase, replace "." with "_" and add "DD_" prefix to key so that we follow the same environment
+ // variable convention as the core agent.
+ processConfigKey := "DD_" + strings.Replace(strings.ToUpper(key), ".", "_", -1)
+ processAgentKey := strings.Replace(processConfigKey, "PROCESS_CONFIG", "PROCESS_AGENT", 1)
+
+ envs := []string{processConfigKey, processAgentKey}
+ config.BindEnvAndSetDefault(key, val, envs...)
+}
+
+// procBindEnv is a helper function that generates both "DD_PROCESS_CONFIG_" and "DD_PROCESS_AGENT_" prefixes from a key, but does not set a default.
+// We need this helper function because the standard BindEnv can only generate one prefix from a key.
+func procBindEnv(config pkgconfigmodel.Config, key string) {
+ processConfigKey := "DD_" + strings.Replace(strings.ToUpper(key), ".", "_", -1)
+ processAgentKey := strings.Replace(processConfigKey, "PROCESS_CONFIG", "PROCESS_AGENT", 1)
+
+ config.BindEnv(key, processConfigKey, processAgentKey)
+}
+
+func setupProcesses(config pkgconfigmodel.Config) {
+ // "process_config.enabled" is deprecated. We must still be able to detect if it is present, to know if we should use it
+ // or container_collection.enabled and process_collection.enabled.
+ procBindEnv(config, "process_config.enabled")
+ config.SetEnvKeyTransformer("process_config.enabled", func(val string) interface{} {
+ // DD_PROCESS_AGENT_ENABLED: true - Process + Container checks enabled
+ // false - No checks enabled
+ // (unset) - Defaults are used, only container check is enabled
+ if enabled, _ := strconv.ParseBool(val); enabled {
+ return "true"
+ }
+ return "disabled"
+ })
+ procBindEnvAndSetDefault(config, "process_config.container_collection.enabled", true)
+ procBindEnvAndSetDefault(config, "process_config.process_collection.enabled", false)
+
+ // This allows for the process check to run in the core agent but is for linux only
+ procBindEnvAndSetDefault(config, "process_config.run_in_core_agent.enabled", false)
+
+ config.BindEnv("process_config.process_dd_url",
+ "DD_PROCESS_CONFIG_PROCESS_DD_URL",
+ "DD_PROCESS_AGENT_PROCESS_DD_URL",
+ "DD_PROCESS_AGENT_URL",
+ "DD_PROCESS_CONFIG_URL",
+ )
+ procBindEnv(config, "process_config.events_dd_url")
+ config.SetKnown("process_config.dd_agent_env")
+ config.SetKnown("process_config.intervals.process_realtime")
+ procBindEnvAndSetDefault(config, "process_config.queue_size", DefaultProcessQueueSize)
+ procBindEnvAndSetDefault(config, "process_config.process_queue_bytes", DefaultProcessQueueBytes)
+ procBindEnvAndSetDefault(config, "process_config.rt_queue_size", DefaultProcessRTQueueSize)
+ procBindEnvAndSetDefault(config, "process_config.max_per_message", DefaultProcessMaxPerMessage)
+ procBindEnvAndSetDefault(config, "process_config.max_message_bytes", DefaultProcessMaxMessageBytes)
+ procBindEnvAndSetDefault(config, "process_config.cmd_port", DefaultProcessCmdPort)
+ config.SetKnown("process_config.intervals.process")
+ config.SetKnown("process_config.blacklist_patterns")
+ config.SetKnown("process_config.intervals.container")
+ config.SetKnown("process_config.intervals.container_realtime")
+ procBindEnvAndSetDefault(config, "process_config.dd_agent_bin", DefaultDDAgentBin)
+ config.BindEnv("process_config.custom_sensitive_words",
+ "DD_CUSTOM_SENSITIVE_WORDS",
+ "DD_PROCESS_CONFIG_CUSTOM_SENSITIVE_WORDS",
+ "DD_PROCESS_AGENT_CUSTOM_SENSITIVE_WORDS")
+ config.SetEnvKeyTransformer("process_config.custom_sensitive_words", func(val string) interface{} {
+ // historically we accept DD_CUSTOM_SENSITIVE_WORDS as "w1,w2,..." but Viper expects the user to set a list as ["w1","w2",...]
+ if strings.HasPrefix(val, "[") && strings.HasSuffix(val, "]") {
+ return val
+ }
+
+ return strings.Split(val, ",")
+ })
+ config.BindEnv("process_config.scrub_args",
+ "DD_SCRUB_ARGS",
+ "DD_PROCESS_CONFIG_SCRUB_ARGS",
+ "DD_PROCESS_AGENT_SCRUB_ARGS")
+ config.BindEnv("process_config.strip_proc_arguments",
+ "DD_STRIP_PROCESS_ARGS",
+ "DD_PROCESS_CONFIG_STRIP_PROC_ARGUMENTS",
+ "DD_PROCESS_AGENT_STRIP_PROC_ARGUMENTS")
+ // Use PDH API to collect performance counter data for process check on Windows
+ procBindEnvAndSetDefault(config, "process_config.windows.use_perf_counters", false)
+ config.BindEnvAndSetDefault("process_config.additional_endpoints", make(map[string][]string),
+ "DD_PROCESS_CONFIG_ADDITIONAL_ENDPOINTS",
+ "DD_PROCESS_AGENT_ADDITIONAL_ENDPOINTS",
+ "DD_PROCESS_ADDITIONAL_ENDPOINTS",
+ )
+ procBindEnvAndSetDefault(config, "process_config.events_additional_endpoints", make(map[string][]string))
+ config.SetKnown("process_config.intervals.connections")
+ procBindEnvAndSetDefault(config, "process_config.expvar_port", DefaultProcessExpVarPort)
+ procBindEnvAndSetDefault(config, "process_config.log_file", DefaultProcessAgentLogFile)
+ procBindEnvAndSetDefault(config, "process_config.internal_profiling.enabled", false)
+ procBindEnvAndSetDefault(config, "process_config.grpc_connection_timeout_secs", DefaultGRPCConnectionTimeoutSecs)
+ procBindEnvAndSetDefault(config, "process_config.remote_tagger", false)
+ procBindEnvAndSetDefault(config, "process_config.remote_workloadmeta", false) // This flag might change. It's still being tested.
+ procBindEnvAndSetDefault(config, "process_config.disable_realtime_checks", false)
+ procBindEnvAndSetDefault(config, "process_config.ignore_zombie_processes", false)
+
+ // Process Discovery Check
+ config.BindEnvAndSetDefault("process_config.process_discovery.enabled", true,
+ "DD_PROCESS_CONFIG_PROCESS_DISCOVERY_ENABLED",
+ "DD_PROCESS_AGENT_PROCESS_DISCOVERY_ENABLED",
+ "DD_PROCESS_CONFIG_DISCOVERY_ENABLED", // Also bind old environment variables
+ "DD_PROCESS_AGENT_DISCOVERY_ENABLED",
+ )
+ procBindEnvAndSetDefault(config, "process_config.process_discovery.interval", 4*time.Hour)
+
+ procBindEnvAndSetDefault(config, "process_config.process_discovery.hint_frequency", DefaultProcessDiscoveryHintFrequency)
+
+ procBindEnvAndSetDefault(config, "process_config.drop_check_payloads", []string{})
+
+ // Process Lifecycle Events
+ procBindEnvAndSetDefault(config, "process_config.event_collection.store.max_items", DefaultProcessEventStoreMaxItems)
+ procBindEnvAndSetDefault(config, "process_config.event_collection.store.max_pending_pushes", DefaultProcessEventStoreMaxPendingPushes)
+ procBindEnvAndSetDefault(config, "process_config.event_collection.store.max_pending_pulls", DefaultProcessEventStoreMaxPendingPulls)
+ procBindEnvAndSetDefault(config, "process_config.event_collection.store.stats_interval", DefaultProcessEventStoreStatsInterval)
+ procBindEnvAndSetDefault(config, "process_config.event_collection.enabled", false)
+ procBindEnvAndSetDefault(config, "process_config.event_collection.interval", DefaultProcessEventsCheckInterval)
+
+ procBindEnvAndSetDefault(config, "process_config.cache_lookupid", false)
+
+ procBindEnvAndSetDefault(config, "process_config.language_detection.grpc_port", DefaultProcessEntityStreamPort)
+
+ processesAddOverrideOnce.Do(func() {
+ pkgconfigmodel.AddOverrideFunc(loadProcessTransforms)
+ })
+}
+
+// loadProcessTransforms loads transforms associated with process config settings.
+func loadProcessTransforms(config pkgconfigmodel.Config) {
+ if config.IsSet("process_config.enabled") {
+ log.Info("process_config.enabled is deprecated, use process_config.container_collection.enabled " +
+ "and process_config.process_collection.enabled instead, " +
+ "see https://docs.datadoghq.com/infrastructure/process#installation for more information")
+ procConfigEnabled := strings.ToLower(config.GetString("process_config.enabled"))
+ if procConfigEnabled == "disabled" {
+ config.Set("process_config.process_collection.enabled", false, pkgconfigmodel.SourceAgentRuntime)
+ config.Set("process_config.container_collection.enabled", false, pkgconfigmodel.SourceAgentRuntime)
+ } else if enabled, _ := strconv.ParseBool(procConfigEnabled); enabled { // "true"
+ config.Set("process_config.process_collection.enabled", true, pkgconfigmodel.SourceAgentRuntime)
+ config.Set("process_config.container_collection.enabled", false, pkgconfigmodel.SourceAgentRuntime)
+ } else { // "false"
+ config.Set("process_config.process_collection.enabled", false, pkgconfigmodel.SourceAgentRuntime)
+ config.Set("process_config.container_collection.enabled", true, pkgconfigmodel.SourceAgentRuntime)
+ }
+ }
+}
+
+// GetProcessAPIAddressPort returns the API endpoint of the process agent
+func GetProcessAPIAddressPort(config pkgconfigmodel.Reader) (string, error) {
+ address, err := GetIPCAddress(config)
+ if err != nil {
+ return "", err
+ }
+
+ port := config.GetInt("process_config.cmd_port")
+ if port <= 0 {
+ log.Warnf("Invalid process_config.cmd_port -- %d, using default port %d", port, DefaultProcessCmdPort)
+ port = DefaultProcessCmdPort
+ }
+
+ addrPort := net.JoinHostPort(address, strconv.Itoa(port))
+ return addrPort, nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/standard_names.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/standard_names.go
new file mode 100644
index 0000000000..62176ffc60
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/standard_names.go
@@ -0,0 +1,50 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package setup
+
+// StandardJMXIntegrations is the list of standard jmx integrations.
+// This list is used by the Agent to determine if an integration is JMXFetch-based,
+// based only on the integration name.
+// DEPRECATED: this list is only used for backward compatibility with older JMXFetch integration
+// configs. All JMXFetch integrations should instead define `is_jmx: true` at the init_config or
+// instance level.
+var StandardJMXIntegrations = map[string]struct{}{
+ "activemq": {},
+ "activemq_58": {},
+ "cassandra": {},
+ "jmx": {},
+ "presto": {},
+ "solr": {},
+ "tomcat": {},
+ "kafka": {},
+}
+
+// StandardStatsdPrefixes is a list of the statsd prefixes used by the agent and its components
+var StandardStatsdPrefixes = []string{
+ "datadog.agent",
+ "datadog.dogstatsd",
+ "datadog.process",
+ "datadog.trace_agent",
+ "datadog.tracer",
+
+ "activemq",
+ "activemq_58",
+ "airflow",
+ "cassandra",
+ "confluent",
+ "hazelcast",
+ "hive",
+ "ignite",
+ "jboss",
+ "jvm",
+ "kafka",
+ "presto",
+ "sidekiq",
+ "solr",
+ "tomcat",
+
+ "runtime",
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/system_probe.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/system_probe.go
new file mode 100644
index 0000000000..3293bc863f
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/system_probe.go
@@ -0,0 +1,407 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package setup
+
+import (
+ "encoding/json"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "time"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+type transformerFunction func(string) interface{}
+
+const (
+ spNS = "system_probe_config"
+ netNS = "network_config"
+ smNS = "service_monitoring_config"
+ evNS = "event_monitoring_config"
+ smjtNS = smNS + ".tls.java"
+ diNS = "dynamic_instrumentation"
+ wcdNS = "windows_crash_detection"
+ pngNS = "ping"
+ tracerouteNS = "traceroute"
+ defaultConnsMessageBatchSize = 600
+
+ // defaultServiceMonitoringJavaAgentArgs is default arguments that are passing to the injected java USM agent
+ defaultServiceMonitoringJavaAgentArgs = "dd.appsec.enabled=false,dd.trace.enabled=false,dd.usm.enabled=true"
+
+ // defaultRuntimeCompilerOutputDir is the default path for output from the system-probe runtime compiler
+ defaultRuntimeCompilerOutputDir = "/var/tmp/datadog-agent/system-probe/build"
+
+ // defaultKernelHeadersDownloadDir is the default path for downloading kernel headers for runtime compilation
+ defaultKernelHeadersDownloadDir = "/var/tmp/datadog-agent/system-probe/kernel-headers"
+
+ // defaultAptConfigDirSuffix is the default path under `/etc` to the apt config directory
+ defaultAptConfigDirSuffix = "/apt"
+
+ // defaultYumReposDirSuffix is the default path under `/etc` to the yum repository directory
+ defaultYumReposDirSuffix = "/yum.repos.d"
+
+ // defaultZypperReposDirSuffix is the default path under `/etc` to the zypper repository directory
+ defaultZypperReposDirSuffix = "/zypp/repos.d"
+
+ defaultOffsetThreshold = 400
+)
+
+var (
+ // defaultSystemProbeBPFDir is the default path for eBPF programs
+ defaultSystemProbeBPFDir = filepath.Join(InstallPath, "embedded/share/system-probe/ebpf")
+
+ // defaultSystemProbeJavaDir is the default path for java agent program
+ defaultSystemProbeJavaDir = filepath.Join(InstallPath, "embedded/share/system-probe/java")
+)
+
+// InitSystemProbeConfig declares all the configuration values normally read from system-probe.yaml.
+func InitSystemProbeConfig(cfg pkgconfigmodel.Config) {
+ cfg.BindEnvAndSetDefault("ignore_host_etc", false)
+ cfg.BindEnvAndSetDefault("go_core_dump", false)
+
+ // SBOM configuration
+ cfg.BindEnvAndSetDefault("sbom.host.enabled", false)
+ cfg.BindEnvAndSetDefault("sbom.host.analyzers", []string{"os"})
+ cfg.BindEnvAndSetDefault("sbom.cache_directory", filepath.Join(defaultRunPath, "sbom-sysprobe"))
+ cfg.BindEnvAndSetDefault("sbom.clear_cache_on_exit", false)
+ cfg.BindEnvAndSetDefault("sbom.cache.max_disk_size", 1000*1000*100) // used by custom cache: max disk space used by cached objects. Not equal to max disk usage
+ cfg.BindEnvAndSetDefault("sbom.cache.clean_interval", "30m") // used by custom cache.
+ cfg.BindEnvAndSetDefault("sbom.scan_queue.base_backoff", "5m")
+ cfg.BindEnvAndSetDefault("sbom.scan_queue.max_backoff", "1h")
+
+ // Auto exit configuration
+ cfg.BindEnvAndSetDefault("auto_exit.validation_period", 60)
+ cfg.BindEnvAndSetDefault("auto_exit.noprocess.enabled", false)
+ cfg.BindEnvAndSetDefault("auto_exit.noprocess.excluded_processes", []string{})
+
+ // statsd
+ cfg.BindEnv("bind_host")
+ cfg.BindEnvAndSetDefault("dogstatsd_port", 8125)
+
+ // logging
+ cfg.SetKnown(join(spNS, "log_file"))
+ cfg.SetKnown(join(spNS, "log_level"))
+ cfg.BindEnvAndSetDefault("log_file", defaultSystemProbeLogFilePath)
+ cfg.BindEnvAndSetDefault("log_level", "info", "DD_LOG_LEVEL", "LOG_LEVEL")
+ cfg.BindEnvAndSetDefault("syslog_uri", "")
+ cfg.BindEnvAndSetDefault("syslog_rfc", false)
+ cfg.BindEnvAndSetDefault("log_to_syslog", false)
+ cfg.BindEnvAndSetDefault("log_to_console", true)
+ cfg.BindEnvAndSetDefault("log_format_json", false)
+ cfg.BindEnvAndSetDefault("log_file_max_size", "10Mb")
+ cfg.BindEnvAndSetDefault("log_file_max_rolls", 1)
+
+ // secrets backend
+ cfg.BindEnvAndSetDefault("secret_backend_command", "")
+ cfg.BindEnvAndSetDefault("secret_backend_arguments", []string{})
+ cfg.BindEnvAndSetDefault("secret_backend_output_max_size", 0)
+ cfg.BindEnvAndSetDefault("secret_backend_timeout", 0)
+ cfg.BindEnvAndSetDefault("secret_backend_command_allow_group_exec_perm", false)
+ cfg.BindEnvAndSetDefault("secret_backend_skip_checks", false)
+
+ // settings for system-probe in general
+ cfg.BindEnvAndSetDefault(join(spNS, "enabled"), false, "DD_SYSTEM_PROBE_ENABLED")
+ cfg.BindEnvAndSetDefault(join(spNS, "external"), false, "DD_SYSTEM_PROBE_EXTERNAL")
+ cfg.SetKnown(join(spNS, "adjusted"))
+
+ cfg.BindEnvAndSetDefault(join(spNS, "sysprobe_socket"), defaultSystemProbeAddress, "DD_SYSPROBE_SOCKET")
+ cfg.BindEnvAndSetDefault(join(spNS, "max_conns_per_message"), defaultConnsMessageBatchSize)
+
+ cfg.BindEnvAndSetDefault(join(spNS, "debug_port"), 0)
+ cfg.BindEnvAndSetDefault(join(spNS, "telemetry_enabled"), false, "DD_TELEMETRY_ENABLED")
+ cfg.BindEnvAndSetDefault(join(spNS, "health_port"), int64(0), "DD_SYSTEM_PROBE_HEALTH_PORT")
+
+ cfg.BindEnvAndSetDefault(join(spNS, "internal_profiling.enabled"), false, "DD_SYSTEM_PROBE_INTERNAL_PROFILING_ENABLED")
+ cfg.BindEnvAndSetDefault(join(spNS, "internal_profiling.site"), DefaultSite, "DD_SYSTEM_PROBE_INTERNAL_PROFILING_SITE", "DD_SITE")
+ cfg.BindEnvAndSetDefault(join(spNS, "internal_profiling.profile_dd_url"), "", "DD_SYSTEM_PROBE_INTERNAL_PROFILING_DD_URL", "DD_APM_INTERNAL_PROFILING_DD_URL")
+ cfg.BindEnvAndSetDefault(join(spNS, "internal_profiling.api_key"), "", "DD_SYSTEM_PROBE_INTERNAL_PROFILING_API_KEY", "DD_API_KEY")
+ cfg.BindEnvAndSetDefault(join(spNS, "internal_profiling.env"), "", "DD_SYSTEM_PROBE_INTERNAL_PROFILING_ENV", "DD_ENV")
+ cfg.BindEnvAndSetDefault(join(spNS, "internal_profiling.period"), 5*time.Minute, "DD_SYSTEM_PROBE_INTERNAL_PROFILING_PERIOD")
+ cfg.BindEnvAndSetDefault(join(spNS, "internal_profiling.cpu_duration"), 1*time.Minute, "DD_SYSTEM_PROBE_INTERNAL_PROFILING_CPU_DURATION")
+ cfg.BindEnvAndSetDefault(join(spNS, "internal_profiling.mutex_profile_fraction"), 0)
+ cfg.BindEnvAndSetDefault(join(spNS, "internal_profiling.block_profile_rate"), 0)
+ cfg.BindEnvAndSetDefault(join(spNS, "internal_profiling.enable_goroutine_stacktraces"), false)
+ cfg.BindEnvAndSetDefault(join(spNS, "internal_profiling.enable_block_profiling"), false)
+ cfg.BindEnvAndSetDefault(join(spNS, "internal_profiling.enable_mutex_profiling"), false)
+ cfg.BindEnvAndSetDefault(join(spNS, "internal_profiling.delta_profiles"), true)
+ cfg.BindEnvAndSetDefault(join(spNS, "internal_profiling.custom_attributes"), []string{"module", "rule_id"})
+ cfg.BindEnvAndSetDefault(join(spNS, "internal_profiling.unix_socket"), "")
+ cfg.BindEnvAndSetDefault(join(spNS, "internal_profiling.extra_tags"), []string{})
+
+ cfg.BindEnvAndSetDefault(join(spNS, "memory_controller.enabled"), false)
+ cfg.BindEnvAndSetDefault(join(spNS, "memory_controller.hierarchy"), "v1")
+ cfg.BindEnvAndSetDefault(join(spNS, "memory_controller.pressure_levels"), map[string]string{})
+ cfg.BindEnvAndSetDefault(join(spNS, "memory_controller.thresholds"), map[string]string{})
+
+ // ebpf general settings
+ cfg.BindEnvAndSetDefault(join(spNS, "bpf_debug"), false, "DD_SYSTEM_PROBE_CONFIG_BPF_DEBUG", "BPF_DEBUG")
+ cfg.BindEnvAndSetDefault(join(spNS, "bpf_dir"), defaultSystemProbeBPFDir, "DD_SYSTEM_PROBE_BPF_DIR")
+ cfg.BindEnvAndSetDefault(join(spNS, "excluded_linux_versions"), []string{})
+ cfg.BindEnvAndSetDefault(join(spNS, "enable_tracepoints"), false)
+ cfg.BindEnvAndSetDefault(join(spNS, "enable_co_re"), true, "DD_ENABLE_CO_RE")
+ cfg.BindEnvAndSetDefault(join(spNS, "btf_path"), "", "DD_SYSTEM_PROBE_BTF_PATH")
+ cfg.BindEnv(join(spNS, "enable_runtime_compiler"), "DD_ENABLE_RUNTIME_COMPILER")
+ cfg.BindEnvAndSetDefault(join(spNS, "allow_precompiled_fallback"), true, "DD_ALLOW_PRECOMPILED_FALLBACK")
+ cfg.BindEnvAndSetDefault(join(spNS, "allow_runtime_compiled_fallback"), true, "DD_ALLOW_RUNTIME_COMPILED_FALLBACK")
+ cfg.BindEnvAndSetDefault(join(spNS, "runtime_compiler_output_dir"), defaultRuntimeCompilerOutputDir, "DD_RUNTIME_COMPILER_OUTPUT_DIR")
+ cfg.BindEnv(join(spNS, "enable_kernel_header_download"), "DD_ENABLE_KERNEL_HEADER_DOWNLOAD")
+ cfg.BindEnvAndSetDefault(join(spNS, "kernel_header_dirs"), []string{}, "DD_KERNEL_HEADER_DIRS")
+ cfg.BindEnvAndSetDefault(join(spNS, "kernel_header_download_dir"), defaultKernelHeadersDownloadDir, "DD_KERNEL_HEADER_DOWNLOAD_DIR")
+ cfg.BindEnvAndSetDefault(join(spNS, "apt_config_dir"), suffixHostEtc(defaultAptConfigDirSuffix), "DD_APT_CONFIG_DIR")
+ cfg.BindEnvAndSetDefault(join(spNS, "yum_repos_dir"), suffixHostEtc(defaultYumReposDirSuffix), "DD_YUM_REPOS_DIR")
+ cfg.BindEnvAndSetDefault(join(spNS, "zypper_repos_dir"), suffixHostEtc(defaultZypperReposDirSuffix), "DD_ZYPPER_REPOS_DIR")
+ cfg.BindEnvAndSetDefault(join(spNS, "attach_kprobes_with_kprobe_events_abi"), false, "DD_ATTACH_KPROBES_WITH_KPROBE_EVENTS_ABI")
+ cfg.BindEnvAndSetDefault(join(spNS, "ebpf_instrumentation", "enabled"), false, "DD_ENABLE_EBPF_INSTRUMENTATION")
+
+ // User Tracer
+ cfg.BindEnvAndSetDefault(join(diNS, "enabled"), false, "DD_DYNAMIC_INSTRUMENTATION_ENABLED")
+
+ // network_tracer settings
+ // we cannot use BindEnvAndSetDefault for network_config.enabled because we need to know if it was manually set.
+ cfg.BindEnv(join(netNS, "enabled"), "DD_SYSTEM_PROBE_NETWORK_ENABLED") //nolint:errcheck
+ cfg.BindEnvAndSetDefault(join(spNS, "disable_tcp"), false, "DD_DISABLE_TCP_TRACING")
+ cfg.BindEnvAndSetDefault(join(spNS, "disable_udp"), false, "DD_DISABLE_UDP_TRACING")
+ cfg.BindEnvAndSetDefault(join(spNS, "disable_ipv6"), false, "DD_DISABLE_IPV6_TRACING")
+
+ cfg.SetDefault(join(netNS, "collect_tcp_v4"), true)
+ cfg.SetDefault(join(netNS, "collect_tcp_v6"), true)
+ cfg.SetDefault(join(netNS, "collect_udp_v4"), true)
+ cfg.SetDefault(join(netNS, "collect_udp_v6"), true)
+
+ cfg.BindEnvAndSetDefault(join(spNS, "offset_guess_threshold"), int64(defaultOffsetThreshold))
+
+ cfg.BindEnvAndSetDefault(join(spNS, "max_tracked_connections"), 65536)
+ cfg.BindEnv(join(spNS, "max_closed_connections_buffered"))
+ cfg.BindEnvAndSetDefault(join(spNS, "closed_connection_flush_threshold"), 0)
+ cfg.BindEnvAndSetDefault(join(spNS, "closed_channel_size"), 500)
+ cfg.BindEnvAndSetDefault(join(spNS, "max_connection_state_buffered"), 75000)
+
+ cfg.BindEnvAndSetDefault(join(spNS, "disable_dns_inspection"), false, "DD_DISABLE_DNS_INSPECTION")
+ cfg.BindEnvAndSetDefault(join(spNS, "collect_dns_stats"), true, "DD_COLLECT_DNS_STATS")
+ cfg.BindEnvAndSetDefault(join(spNS, "collect_local_dns"), false, "DD_COLLECT_LOCAL_DNS")
+ cfg.BindEnvAndSetDefault(join(spNS, "collect_dns_domains"), true, "DD_COLLECT_DNS_DOMAINS")
+ cfg.BindEnvAndSetDefault(join(spNS, "max_dns_stats"), 20000)
+ cfg.BindEnvAndSetDefault(join(spNS, "dns_timeout_in_s"), 15)
+
+ cfg.BindEnvAndSetDefault(join(spNS, "enable_conntrack"), true)
+ cfg.BindEnvAndSetDefault(join(spNS, "conntrack_max_state_size"), 65536*2)
+ cfg.BindEnvAndSetDefault(join(spNS, "conntrack_rate_limit"), 500)
+ cfg.BindEnvAndSetDefault(join(spNS, "enable_conntrack_all_namespaces"), true, "DD_SYSTEM_PROBE_ENABLE_CONNTRACK_ALL_NAMESPACES")
+ cfg.BindEnvAndSetDefault(join(netNS, "enable_protocol_classification"), true, "DD_ENABLE_PROTOCOL_CLASSIFICATION")
+ cfg.BindEnvAndSetDefault(join(netNS, "enable_ringbuffers"), true, "DD_SYSTEM_PROBE_NETWORK_ENABLE_RINGBUFFERS")
+ cfg.BindEnvAndSetDefault(join(netNS, "ignore_conntrack_init_failure"), false, "DD_SYSTEM_PROBE_NETWORK_IGNORE_CONNTRACK_INIT_FAILURE")
+ cfg.BindEnvAndSetDefault(join(netNS, "conntrack_init_timeout"), 10*time.Second)
+ cfg.BindEnvAndSetDefault(join(netNS, "allow_netlink_conntracker_fallback"), true)
+
+ cfg.BindEnvAndSetDefault(join(spNS, "source_excludes"), map[string][]string{})
+ cfg.BindEnvAndSetDefault(join(spNS, "dest_excludes"), map[string][]string{})
+
+ cfg.BindEnvAndSetDefault(join(spNS, "language_detection.enabled"), false)
+
+ cfg.SetKnown(join(spNS, "process_service_inference", "use_improved_algorithm"))
+
+ // For backward compatibility
+ cfg.BindEnv(join(smNS, "process_service_inference", "enabled"), "DD_SYSTEM_PROBE_PROCESS_SERVICE_INFERENCE_ENABLED")
+ cfg.BindEnv(join(spNS, "process_service_inference", "enabled"))
+
+ // For backward compatibility
+ cfg.BindEnv(join(smNS, "process_service_inference", "use_windows_service_name"), "DD_SYSTEM_PROBE_PROCESS_SERVICE_INFERENCE_USE_WINDOWS_SERVICE_NAME")
+ cfg.BindEnv(join(spNS, "process_service_inference", "use_windows_service_name"))
+
+ // network_config namespace only
+
+ // For backward compatibility
+ cfg.BindEnv(join(netNS, "enable_http_monitoring"), "DD_SYSTEM_PROBE_NETWORK_ENABLE_HTTP_MONITORING")
+ cfg.BindEnv(join(smNS, "enable_http_monitoring"))
+
+ // For backward compatibility
+ cfg.BindEnv(join(netNS, "enable_https_monitoring"), "DD_SYSTEM_PROBE_NETWORK_ENABLE_HTTPS_MONITORING")
+ cfg.BindEnv(join(smNS, "tls", "native", "enabled"))
+
+ // For backward compatibility
+ cfg.BindEnv(join(smNS, "enable_go_tls_support"))
+ cfg.BindEnv(join(smNS, "tls", "go", "enabled"))
+ cfg.BindEnvAndSetDefault(join(smNS, "tls", "go", "exclude_self"), true)
+
+ cfg.BindEnvAndSetDefault(join(smNS, "enable_http2_monitoring"), false)
+ cfg.BindEnvAndSetDefault(join(smNS, "enable_kafka_monitoring"), false)
+ cfg.BindEnvAndSetDefault(join(smNS, "tls", "istio", "enabled"), false)
+ cfg.BindEnv(join(smNS, "tls", "nodejs", "enabled"))
+ cfg.BindEnvAndSetDefault(join(smjtNS, "enabled"), false)
+ cfg.BindEnvAndSetDefault(join(smjtNS, "debug"), false)
+ cfg.BindEnvAndSetDefault(join(smjtNS, "args"), defaultServiceMonitoringJavaAgentArgs)
+ cfg.BindEnvAndSetDefault(join(smjtNS, "allow_regex"), "")
+ cfg.BindEnvAndSetDefault(join(smjtNS, "block_regex"), "")
+ cfg.BindEnvAndSetDefault(join(smjtNS, "dir"), defaultSystemProbeJavaDir)
+ cfg.BindEnvAndSetDefault(join(smNS, "enable_http_stats_by_status_code"), true)
+
+ cfg.BindEnvAndSetDefault(join(netNS, "enable_gateway_lookup"), true, "DD_SYSTEM_PROBE_NETWORK_ENABLE_GATEWAY_LOOKUP")
+ // Default value (100000) is set in `adjustUSM`, to avoid having "deprecation warning", due to the default value.
+ cfg.BindEnv(join(netNS, "max_http_stats_buffered"), "DD_SYSTEM_PROBE_NETWORK_MAX_HTTP_STATS_BUFFERED")
+ cfg.BindEnv(join(smNS, "max_http_stats_buffered"))
+ cfg.BindEnvAndSetDefault(join(smNS, "max_kafka_stats_buffered"), 100000)
+ cfg.BindEnv(join(smNS, "max_concurrent_requests"))
+ cfg.BindEnv(join(smNS, "enable_quantization"))
+ cfg.BindEnv(join(smNS, "enable_connection_rollup"))
+ cfg.BindEnv(join(smNS, "enable_ring_buffers"))
+
+ oldHTTPRules := join(netNS, "http_replace_rules")
+ newHTTPRules := join(smNS, "http_replace_rules")
+ cfg.BindEnv(newHTTPRules)
+ cfg.BindEnv(oldHTTPRules, "DD_SYSTEM_PROBE_NETWORK_HTTP_REPLACE_RULES")
+ httpRulesTransformer := func(key string) transformerFunction {
+ return func(in string) interface{} {
+ var out []map[string]string
+ if err := json.Unmarshal([]byte(in), &out); err != nil {
+ log.Warnf(`%q can not be parsed: %v`, key, err)
+ }
+ return out
+ }
+ }
+ cfg.SetEnvKeyTransformer(oldHTTPRules, httpRulesTransformer(oldHTTPRules))
+ cfg.SetEnvKeyTransformer(newHTTPRules, httpRulesTransformer(newHTTPRules))
+
+ // Default value (1024) is set in `adjustUSM`, to avoid having "deprecation warning", due to the default value.
+ cfg.BindEnv(join(netNS, "max_tracked_http_connections"))
+ cfg.BindEnv(join(smNS, "max_tracked_http_connections"))
+ // Default value (512) is set in `adjustUSM`, to avoid having "deprecation warning", due to the default value.
+ cfg.BindEnv(join(netNS, "http_notification_threshold"))
+ cfg.BindEnv(join(smNS, "http_notification_threshold"))
+ // Default value (512) is set in `adjustUSM`, to avoid having "deprecation warning", due to the default value.
+ cfg.BindEnv(join(netNS, "http_max_request_fragment"))
+ cfg.BindEnv(join(smNS, "http_max_request_fragment"))
+
+ // list of DNS query types to be recorded
+ cfg.BindEnvAndSetDefault(join(netNS, "dns_recorded_query_types"), []string{})
+ // (temporary) enable submitting DNS stats by query type.
+ cfg.BindEnvAndSetDefault(join(netNS, "enable_dns_by_querytype"), false)
+ // connection aggregation with port rollups
+ cfg.BindEnvAndSetDefault(join(netNS, "enable_connection_rollup"), false)
+
+ // windows config
+ cfg.BindEnvAndSetDefault(join(spNS, "windows.enable_monotonic_count"), false)
+
+ // oom_kill module
+ cfg.BindEnvAndSetDefault(join(spNS, "enable_oom_kill"), false)
+
+ // tcp_queue_length module
+ cfg.BindEnvAndSetDefault(join(spNS, "enable_tcp_queue_length"), false)
+ // process module
+ // nested within system_probe_config to not conflict with process-agent's process_config
+ cfg.BindEnvAndSetDefault(join(spNS, "process_config.enabled"), false, "DD_SYSTEM_PROBE_PROCESS_ENABLED")
+ // ebpf module
+ cfg.BindEnvAndSetDefault(join("ebpf_check", "enabled"), false)
+ cfg.BindEnvAndSetDefault(join("ebpf_check", "kernel_bpf_stats"), false)
+
+ // settings for the entry count of the ebpfcheck
+ // control the size of the buffers used for the batch lookups of the ebpf maps
+ cfg.BindEnvAndSetDefault(join("ebpf_check", "entry_count", "max_keys_buffer_size_bytes"), 512*1024)
+ cfg.BindEnvAndSetDefault(join("ebpf_check", "entry_count", "max_values_buffer_size_bytes"), 1024*1024)
+ // How many times we can restart the entry count of a map before we give up if we get an iteration restart
+ // due to the map changing while we look it up
+ cfg.BindEnvAndSetDefault(join("ebpf_check", "entry_count", "max_restarts"), 3)
+ // How many entries we should keep track of in the entry count map to detect restarts in the
+ // single-item iteration
+ cfg.BindEnvAndSetDefault(join("ebpf_check", "entry_count", "entries_for_iteration_restart_detection"), 100)
+
+ // service monitoring
+ cfg.BindEnvAndSetDefault(join(smNS, "enabled"), false, "DD_SYSTEM_PROBE_SERVICE_MONITORING_ENABLED")
+
+ cfg.BindEnvAndSetDefault(join(smNS, "http2_dynamic_table_map_cleaner_interval_seconds"), 30)
+
+ // Default value (300) is set in `adjustUSM`, to avoid having "deprecation warning", due to the default value.
+ cfg.BindEnv(join(spNS, "http_map_cleaner_interval_in_s"))
+ cfg.BindEnv(join(smNS, "http_map_cleaner_interval_in_s"))
+
+ // Default value (30) is set in `adjustUSM`, to avoid having "deprecation warning", due to the default value.
+ cfg.BindEnv(join(spNS, "http_idle_connection_ttl_in_s"))
+ cfg.BindEnv(join(smNS, "http_idle_connection_ttl_in_s"))
+
+ // event monitoring
+ cfg.BindEnvAndSetDefault(join(evNS, "process", "enabled"), false, "DD_SYSTEM_PROBE_EVENT_MONITORING_PROCESS_ENABLED")
+ cfg.BindEnvAndSetDefault(join(evNS, "network_process", "enabled"), true, "DD_SYSTEM_PROBE_EVENT_MONITORING_NETWORK_PROCESS_ENABLED")
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "enable_all_probes"), false)
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "enable_kernel_filters"), true)
+ eventMonitorBindEnv(cfg, join(evNS, "enable_approvers"))
+ eventMonitorBindEnv(cfg, join(evNS, "enable_discarders"))
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "flush_discarder_window"), 3)
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "pid_cache_size"), 10000)
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "events_stats.tags_cardinality"), "high")
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "custom_sensitive_words"), []string{})
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "erpc_dentry_resolution_enabled"), true)
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "map_dentry_resolution_enabled"), true)
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "dentry_cache_size"), 1024)
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "remote_tagger"), true)
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "runtime_monitor.enabled"), false)
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "network.lazy_interface_prefixes"), []string{})
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "network.classifier_priority"), 10)
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "network.classifier_handle"), 0)
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "event_stream.use_ring_buffer"), true)
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "event_stream.use_fentry"), false)
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "event_stream.use_fentry_amd64"), false)
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "event_stream.use_fentry_arm64"), false)
+ eventMonitorBindEnv(cfg, join(evNS, "event_stream.buffer_size"))
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "envs_with_value"), []string{"LD_PRELOAD", "LD_LIBRARY_PATH", "PATH", "HISTSIZE", "HISTFILESIZE", "GLIBC_TUNABLES"})
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "runtime_compilation.enabled"), false)
+ eventMonitorBindEnv(cfg, join(evNS, "runtime_compilation.compiled_constants_enabled"))
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "network.enabled"), true)
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "events_stats.polling_interval"), 20)
+ eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "syscalls_monitor.enabled"), false)
+ cfg.BindEnvAndSetDefault(join(evNS, "socket"), defaultEventMonitorAddress)
+ cfg.BindEnvAndSetDefault(join(evNS, "event_server.burst"), 40)
+
+ // process event monitoring data limits for network tracer
+ eventMonitorBindEnv(cfg, join(evNS, "network_process", "max_processes_tracked"))
+
+ // enable/disable use of root net namespace
+ cfg.BindEnvAndSetDefault(join(netNS, "enable_root_netns"), true)
+
+ // Windows crash detection
+ cfg.BindEnvAndSetDefault(join(wcdNS, "enabled"), false)
+
+ // Ping
+ cfg.BindEnvAndSetDefault(join(pngNS, "enabled"), false)
+
+ // Traceroute
+ cfg.BindEnvAndSetDefault(join(tracerouteNS, "enabled"), false)
+
+ initCWSSystemProbeConfig(cfg)
+}
+
+func join(pieces ...string) string {
+ return strings.Join(pieces, ".")
+}
+
+func suffixHostEtc(suffix string) string {
+ if value, _ := os.LookupEnv("HOST_ETC"); value != "" {
+ return path.Join(value, suffix)
+ }
+ return path.Join("/etc", suffix)
+}
+
+// eventMonitorBindEnvAndSetDefault is a helper function that generates both "DD_RUNTIME_SECURITY_CONFIG_" and "DD_EVENT_MONITORING_CONFIG_"
+// prefixes from a key. We need this helper function because the standard BindEnvAndSetDefault can only generate one prefix, but we want to
+// support both for backwards compatibility.
+func eventMonitorBindEnvAndSetDefault(config pkgconfigmodel.Config, key string, val interface{}) {
+ // Uppercase, replace "." with "_" and add "DD_" prefix to key so that we follow the same environment
+ // variable convention as the core agent.
+ emConfigKey := "DD_" + strings.Replace(strings.ToUpper(key), ".", "_", -1)
+ runtimeSecKey := strings.Replace(emConfigKey, "EVENT_MONITORING_CONFIG", "RUNTIME_SECURITY_CONFIG", 1)
+
+ envs := []string{emConfigKey, runtimeSecKey}
+ config.BindEnvAndSetDefault(key, val, envs...)
+}
+
+// eventMonitorBindEnv is the same as eventMonitorBindEnvAndSetDefault, but without setting a default.
+func eventMonitorBindEnv(config pkgconfigmodel.Config, key string) {
+ emConfigKey := "DD_" + strings.Replace(strings.ToUpper(key), ".", "_", -1)
+ runtimeSecKey := strings.Replace(emConfigKey, "EVENT_MONITORING_CONFIG", "RUNTIME_SECURITY_CONFIG", 1)
+
+ config.BindEnv(key, emConfigKey, runtimeSecKey)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/system_probe_cws.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/system_probe_cws.go
new file mode 100644
index 0000000000..010384933c
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/system_probe_cws.go
@@ -0,0 +1,122 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package setup
+
+import (
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ "github.com/DataDog/datadog-agent/pkg/config/setup/constants"
+)
+
+func initCWSSystemProbeConfig(cfg pkgconfigmodel.Config) {
+ // CWS - general config
+ // the following entries are platform specific
+ // - runtime_security_config.policies.dir
+ // - runtime_security_config.socket
+ platformCWSConfig(cfg)
+
+ // CWS - general config
+ cfg.BindEnvAndSetDefault("runtime_security_config.enabled", false)
+ cfg.BindEnv("runtime_security_config.fim_enabled")
+ cfg.BindEnvAndSetDefault("runtime_security_config.policies.watch_dir", false)
+ cfg.BindEnvAndSetDefault("runtime_security_config.policies.monitor.enabled", false)
+ cfg.BindEnvAndSetDefault("runtime_security_config.policies.monitor.per_rule_enabled", false)
+ cfg.BindEnvAndSetDefault("runtime_security_config.policies.monitor.report_internal_policies", false)
+ cfg.BindEnvAndSetDefault("runtime_security_config.event_server.burst", 40)
+ cfg.BindEnvAndSetDefault("runtime_security_config.event_server.retention", "6s")
+ cfg.BindEnvAndSetDefault("runtime_security_config.event_server.rate", 10)
+ cfg.BindEnvAndSetDefault("runtime_security_config.cookie_cache_size", 100)
+ cfg.BindEnvAndSetDefault("runtime_security_config.internal_monitoring.enabled", false)
+ cfg.BindEnvAndSetDefault("runtime_security_config.log_patterns", []string{})
+ cfg.BindEnvAndSetDefault("runtime_security_config.log_tags", []string{})
+ cfg.BindEnvAndSetDefault("runtime_security_config.self_test.enabled", true)
+ cfg.BindEnvAndSetDefault("runtime_security_config.self_test.send_report", true)
+ cfg.BindEnvAndSetDefault("runtime_security_config.remote_configuration.enabled", true)
+ cfg.BindEnvAndSetDefault("runtime_security_config.direct_send_from_system_probe", false)
+ cfg.BindEnvAndSetDefault("runtime_security_config.use_secruntime_track", false)
+ cfg.BindEnvAndSetDefault("runtime_security_config.compliance_module.enabled", false)
+
+ cfg.SetDefault("runtime_security_config.windows_filename_cache_max", 16384)
+ cfg.SetDefault("runtime_security_config.windows_registry_cache_max", 4096)
+
+ // CWS - activity dump
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.enabled", true)
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.cleanup_period", "30s")
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.tags_resolution_period", "60s")
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.load_controller_period", "60s")
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.min_timeout", "10m")
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.max_dump_size", 1750)
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.traced_cgroups_count", 5)
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.traced_event_types", []string{"exec", "open", "dns"})
+ cfg.BindEnv("runtime_security_config.activity_dump.cgroup_dump_timeout") // deprecated in favor of dump_duration
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.dump_duration", "900s")
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.rate_limiter", 500)
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.cgroup_wait_list_timeout", "4500s")
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.cgroup_differentiate_args", false)
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.local_storage.max_dumps_count", 100)
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.local_storage.output_directory", DefaultSecurityProfilesDir)
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.local_storage.formats", []string{"profile"})
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.local_storage.compression", false)
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.syscall_monitor.period", "60s")
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.max_dump_count_per_workload", 25)
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.tag_rules.enabled", true)
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.silent_workloads.delay", "10s")
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.silent_workloads.ticker", "10s")
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.workload_deny_list", []string{})
+ cfg.BindEnvAndSetDefault("runtime_security_config.activity_dump.auto_suppression.enabled", false)
+
+ // CWS - SBOM
+ cfg.BindEnvAndSetDefault("runtime_security_config.sbom.enabled", false)
+ cfg.BindEnvAndSetDefault("runtime_security_config.sbom.workloads_cache_size", 10)
+
+ // CWS - Security Profiles
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.enabled", true)
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.max_image_tags", 20)
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.dir", DefaultSecurityProfilesDir)
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.watch_dir", true)
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.cache_size", 10)
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.max_count", 400)
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.remote_configuration.enabled", false)
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.dns_match_max_depth", 3)
+
+ // CWS - Auto suppression
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.auto_suppression.enabled", true)
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.auto_suppression.event_types", []string{"exec", "dns"})
+
+ // CWS - Anomaly detection
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.anomaly_detection.event_types", []string{"exec"})
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.anomaly_detection.default_minimum_stable_period", "900s")
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.anomaly_detection.minimum_stable_period.exec", "900s")
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.anomaly_detection.minimum_stable_period.dns", "900s")
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.anomaly_detection.workload_warmup_period", "180s")
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.anomaly_detection.unstable_profile_time_threshold", "1h")
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.anomaly_detection.unstable_profile_size_threshold", 5000000)
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.anomaly_detection.rate_limiter.period", "1m")
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.anomaly_detection.rate_limiter.num_keys", 1000)
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.anomaly_detection.rate_limiter.num_events_allowed", 300)
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.anomaly_detection.tag_rules.enabled", true)
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.anomaly_detection.silent_rule_events.enabled", false)
+ cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.anomaly_detection.enabled", true)
+
+ // CWS - Hash algorithms
+ cfg.BindEnvAndSetDefault("runtime_security_config.hash_resolver.enabled", true)
+ cfg.BindEnvAndSetDefault("runtime_security_config.hash_resolver.event_types", []string{"exec", "open"})
+ cfg.BindEnvAndSetDefault("runtime_security_config.hash_resolver.max_file_size", (1<<20)*10) // 10 MB
+ cfg.BindEnvAndSetDefault("runtime_security_config.hash_resolver.max_hash_rate", 500)
+ cfg.BindEnvAndSetDefault("runtime_security_config.hash_resolver.max_hash_burst", 1000)
+ cfg.BindEnvAndSetDefault("runtime_security_config.hash_resolver.hash_algorithms", []string{"sha1", "sha256", "ssdeep"})
+ cfg.BindEnvAndSetDefault("runtime_security_config.hash_resolver.cache_size", 500)
+ cfg.BindEnvAndSetDefault("runtime_security_config.hash_resolver.replace", map[string]string{})
+
+ // CWS - UserSessions
+ cfg.BindEnvAndSetDefault("runtime_security_config.user_sessions.cache_size", 1024)
+
+ // CWS -eBPF Less
+ cfg.BindEnvAndSetDefault("runtime_security_config.ebpfless.enabled", false)
+ cfg.BindEnvAndSetDefault("runtime_security_config.ebpfless.socket", constants.DefaultEBPFLessProbeAddr)
+
+ // CWS enforcement capabilities
+ cfg.BindEnvAndSetDefault("runtime_security_config.enforcement.enabled", true)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/system_probe_cws_notwin.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/system_probe_cws_notwin.go
new file mode 100644
index 0000000000..b9408254c1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/system_probe_cws_notwin.go
@@ -0,0 +1,19 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build !windows
+
+package setup
+
+import (
+ "path/filepath"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+)
+
+func platformCWSConfig(cfg pkgconfigmodel.Config) {
+ cfg.BindEnvAndSetDefault("runtime_security_config.policies.dir", DefaultRuntimePoliciesDir)
+ cfg.BindEnvAndSetDefault("runtime_security_config.socket", filepath.Join(InstallPath, "run/runtime-security.sock"))
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/system_probe_cws_windows.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/system_probe_cws_windows.go
new file mode 100644
index 0000000000..9a84b449d4
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/system_probe_cws_windows.go
@@ -0,0 +1,25 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build windows
+
+package setup
+
+import (
+ "path/filepath"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ "github.com/DataDog/datadog-agent/pkg/util/winutil"
+)
+
+func platformCWSConfig(cfg pkgconfigmodel.Config) {
+ programdata, err := winutil.GetProgramDataDir()
+ if err == nil {
+ cfg.BindEnvAndSetDefault("runtime_security_config.policies.dir", filepath.Join(programdata, "runtime-security.d"))
+ } else {
+ cfg.BindEnvAndSetDefault("runtime_security_config.policies.dir", "c:\\programdata\\datadog\\runtime-security.d")
+ }
+ cfg.BindEnvAndSetDefault("runtime_security_config.socket", "localhost:3334")
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/test_helpers.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/test_helpers.go
new file mode 100644
index 0000000000..9ed3db639b
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/test_helpers.go
@@ -0,0 +1,34 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build test
+
+package setup
+
+import (
+ "bytes"
+ "log"
+ "strings"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+)
+
+// Conf generates and returns a new configuration
+func Conf() pkgconfigmodel.Config {
+ conf := pkgconfigmodel.NewConfig("datadog", "DD", strings.NewReplacer(".", "_"))
+ InitConfig(conf)
+ return conf
+}
+
+// ConfFromYAML generates a configuration from the given yaml config
+func ConfFromYAML(yamlConfig string) pkgconfigmodel.Config {
+ conf := Conf()
+ conf.SetConfigType("yaml")
+ e := conf.ReadConfig(bytes.NewBuffer([]byte(yamlConfig)))
+ if e != nil {
+ log.Println(e)
+ }
+ return conf
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/unexpectedunicodefinder.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/unexpectedunicodefinder.go
new file mode 100644
index 0000000000..63767f60ed
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/setup/unexpectedunicodefinder.go
@@ -0,0 +1,57 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2022-present Datadog, Inc.
+
+package setup
+
+import (
+ "unicode"
+ "unicode/utf8"
+)
+
+// UnexpectedUnicodeCodepoint contains specifics about an occurrence of an unexpected unicode codepoint
+type UnexpectedUnicodeCodepoint struct {
+ codepoint rune
+ reason string
+ position int
+}
+
+// FindUnexpectedUnicode reports any _unexpected_ unicode codepoints
+// found in the given 'input' string
+// Unexpected here generally means invisible whitespace and control chars
+func FindUnexpectedUnicode(input string) []UnexpectedUnicodeCodepoint {
+ currentIndex := 0
+ str := input
+ results := make([]UnexpectedUnicodeCodepoint, 0)
+
+ for len(str) > 0 {
+ r, size := utf8.DecodeRuneInString(str)
+ reason := ""
+ switch {
+ case r == utf8.RuneError:
+ reason = "RuneError"
+ case r == ' ' || r == '\r' || r == '\n' || r == '\t':
+ // These are allowed whitespace
+ reason = ""
+ case unicode.IsSpace(r):
+ reason = "unsupported whitespace"
+ case unicode.Is(unicode.Bidi_Control, r):
+ reason = "Bidirectional control"
+ case unicode.Is(unicode.C, r):
+ reason = "Control/surrogate"
+ }
+
+ if reason != "" {
+ results = append(results, UnexpectedUnicodeCodepoint{
+ codepoint: r,
+ reason: reason,
+ position: currentIndex,
+ })
+ }
+
+ currentIndex += size
+ str = str[size:]
+ }
+ return results
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/api_key.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/api_key.go
new file mode 100644
index 0000000000..b8bb4a1316
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/api_key.go
@@ -0,0 +1,15 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package utils
+
+import (
+ "strings"
+)
+
+// SanitizeAPIKey strips newlines and other control characters from a given string.
+func SanitizeAPIKey(key string) string {
+ return strings.TrimSpace(key)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/endpoints.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/endpoints.go
new file mode 100644
index 0000000000..1f33d8d7a4
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/endpoints.go
@@ -0,0 +1,185 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package utils
+
+import (
+ "fmt"
+ "net/url"
+ "regexp"
+ "strings"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+ "github.com/DataDog/datadog-agent/pkg/version"
+)
+
+const (
+ // InfraURLPrefix is the default infra URL prefix for datadog
+ InfraURLPrefix = "https://app."
+)
+
+func getResolvedDDUrl(c pkgconfigmodel.Reader, urlKey string) string {
+ resolvedDDURL := c.GetString(urlKey)
+ if c.IsSet("site") {
+ log.Infof("'site' and '%s' are both set in config: setting main endpoint to '%s': \"%s\"", urlKey, urlKey, c.GetString(urlKey))
+ }
+ return resolvedDDURL
+}
+
+// mergeAdditionalEndpoints merges additional endpoints into keysPerDomain
+func mergeAdditionalEndpoints(keysPerDomain, additionalEndpoints map[string][]string) (map[string][]string, error) {
+ for domain, apiKeys := range additionalEndpoints {
+ // Validating domain
+ _, err := url.Parse(domain)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse url from 'additional_endpoints' %s: %s", domain, err)
+ }
+
+ if _, ok := keysPerDomain[domain]; ok {
+ keysPerDomain[domain] = append(keysPerDomain[domain], apiKeys...)
+ } else {
+ keysPerDomain[domain] = apiKeys
+ }
+ }
+
+ // dedupe api keys and remove domains with no api keys (or empty ones)
+ for domain, apiKeys := range keysPerDomain {
+ dedupedAPIKeys := make([]string, 0, len(apiKeys))
+ seen := make(map[string]bool)
+ for _, apiKey := range apiKeys {
+ trimmedAPIKey := strings.TrimSpace(apiKey)
+ if _, ok := seen[trimmedAPIKey]; !ok && trimmedAPIKey != "" {
+ seen[trimmedAPIKey] = true
+ dedupedAPIKeys = append(dedupedAPIKeys, trimmedAPIKey)
+ }
+ }
+
+ if len(dedupedAPIKeys) > 0 {
+ keysPerDomain[domain] = dedupedAPIKeys
+ } else {
+ log.Infof("No API key provided for domain \"%s\", removing domain from endpoints", domain)
+ delete(keysPerDomain, domain)
+ }
+ }
+
+ return keysPerDomain, nil
+}
+
+// GetMainEndpointBackwardCompatible implements the logic to extract the DD URL from a config, based on `site`,ddURLKey and a backward compatible key
+func GetMainEndpointBackwardCompatible(c pkgconfigmodel.Reader, prefix string, ddURLKey string, backwardKey string) string {
+ if c.IsSet(ddURLKey) && c.GetString(ddURLKey) != "" {
+ // value under ddURLKey takes precedence over backwardKey and 'site'
+ return getResolvedDDUrl(c, ddURLKey)
+ } else if c.IsSet(backwardKey) && c.GetString(backwardKey) != "" {
+ // value under backwardKey takes precedence over 'site'
+ return getResolvedDDUrl(c, backwardKey)
+ } else if c.GetString("site") != "" {
+ return prefix + strings.TrimSpace(c.GetString("site"))
+ }
+ return prefix + pkgconfigsetup.DefaultSite
+}
+
+// GetMultipleEndpoints returns the api keys per domain specified in the main agent config
+func GetMultipleEndpoints(c pkgconfigmodel.Reader) (map[string][]string, error) {
+ ddURL := GetInfraEndpoint(c)
+ // Validating domain
+ if _, err := url.Parse(ddURL); err != nil {
+ return nil, fmt.Errorf("could not parse main endpoint: %s", err)
+ }
+
+ keysPerDomain := map[string][]string{
+ ddURL: {
+ c.GetString("api_key"),
+ },
+ }
+
+ additionalEndpoints := c.GetStringMapStringSlice("additional_endpoints")
+
+ // populate with MRF endpoints too
+ if c.GetBool("multi_region_failover.enabled") {
+ haURL, err := GetMRFInfraEndpoint(c)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse MRF endpoint: %s", err)
+ }
+ additionalEndpoints[haURL] = []string{c.GetString("multi_region_failover.api_key")}
+ }
+ return mergeAdditionalEndpoints(keysPerDomain, additionalEndpoints)
+}
+
+// BuildURLWithPrefix will return an HTTP(s) URL for a site given a certain prefix
+func BuildURLWithPrefix(prefix, site string) string {
+ return prefix + strings.TrimSpace(site)
+}
+
+// GetMainEndpoint returns the main DD URL defined in the config, based on `site` and the prefix, or ddURLKey
+func GetMainEndpoint(c pkgconfigmodel.Reader, prefix string, ddURLKey string) string {
+ // value under ddURLKey takes precedence over 'site'
+ if c.IsSet(ddURLKey) && c.GetString(ddURLKey) != "" {
+ return getResolvedDDUrl(c, ddURLKey)
+ } else if c.GetString("site") != "" {
+ return BuildURLWithPrefix(prefix, c.GetString("site"))
+ }
+ return BuildURLWithPrefix(prefix, pkgconfigsetup.DefaultSite)
+}
+
+// GetMRFEndpoint returns the MRF DD URL defined in the config, based on `multi_region_failover.site` and the prefix, or ddMRFURLKey
+func GetMRFEndpoint(c pkgconfigmodel.Reader, prefix, ddMRFURLKey string) (string, error) {
+ // value under ddURLKey takes precedence over 'multi_region_failover.site'
+ if c.IsSet(ddMRFURLKey) && c.GetString(ddMRFURLKey) != "" {
+ return getResolvedMRFDDURL(c, ddMRFURLKey), nil
+ } else if c.GetString("multi_region_failover.site") != "" {
+ return BuildURLWithPrefix(prefix, c.GetString("multi_region_failover.site")), nil
+ }
+ return "", fmt.Errorf("`multi_region_failover.site` or `multi_region_failover.dd_url` must be set when Multi-Region Failover is enabled")
+}
+
+func getResolvedMRFDDURL(c pkgconfigmodel.Reader, mrfURLKey string) string {
+ resolvedMRFDDURL := c.GetString(mrfURLKey)
+ if c.IsSet("multi_region_failover.site") {
+ log.Infof("'multi_region_failover.site' and '%s' are both set in config: setting main endpoint to '%s': \"%s\"", mrfURLKey, mrfURLKey, resolvedMRFDDURL)
+ }
+ return resolvedMRFDDURL
+}
+
+// GetInfraEndpoint returns the main DD Infra URL defined in config, based on the value of `site` and `dd_url`
+func GetInfraEndpoint(c pkgconfigmodel.Reader) string {
+ return GetMainEndpoint(c, InfraURLPrefix, "dd_url")
+}
+
+// GetMRFInfraEndpoint returns the MRF DD Infra URL defined in config, based on the value of `multi_region_failover.site` and `multi_region_failover.dd_url`
+func GetMRFInfraEndpoint(c pkgconfigmodel.Reader) (string, error) {
+ return GetMRFEndpoint(c, InfraURLPrefix, "multi_region_failover.dd_url")
+}
+
+// ddURLRegexp determines if an URL belongs to Datadog or not. If the URL belongs to Datadog it's prefixed with the Agent
+// version (see AddAgentVersionToDomain).
+var ddURLRegexp = regexp.MustCompile(`^app(\.[a-z]{2}\d)?\.(datad(oghq|0g)\.(com|eu)|ddog-gov\.com)$`)
+
+// getDomainPrefix provides the right prefix for agent X.Y.Z
+func getDomainPrefix(app string) string {
+ v, _ := version.Agent()
+ return fmt.Sprintf("%d-%d-%d-%s.agent", v.Major, v.Minor, v.Patch, app)
+}
+
+// AddAgentVersionToDomain prefixes the domain with the agent version: X-Y-Z.domain
+func AddAgentVersionToDomain(DDURL string, app string) (string, error) {
+ u, err := url.Parse(DDURL)
+ if err != nil {
+ return "", err
+ }
+
+ // we don't update unknown URLs (ie: proxy or custom DD domain)
+ if !ddURLRegexp.MatchString(u.Host) {
+ return DDURL, nil
+ }
+
+ subdomain := strings.Split(u.Host, ".")[0]
+ newSubdomain := getDomainPrefix(app)
+
+ u.Host = strings.Replace(u.Host, subdomain, newSubdomain, 1)
+ return u.String(), nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/metadata_providers.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/metadata_providers.go
new file mode 100644
index 0000000000..ca2d3b01e4
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/metadata_providers.go
@@ -0,0 +1,24 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package utils
+
+import (
+ "time"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+)
+
+// MetadataProviders helps unmarshalling `metadata_providers` config param
+type MetadataProviders struct {
+ Name string `mapstructure:"name"`
+ Interval time.Duration `mapstructure:"interval"`
+}
+
+// GetMetadataProviders returns the "metadata_providers" set in the configuration
+func GetMetadataProviders(c pkgconfigmodel.Reader) ([]MetadataProviders, error) {
+ var mp []MetadataProviders
+ return mp, c.UnmarshalKey("metadata_providers", &mp)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/miscellaneous.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/miscellaneous.go
new file mode 100644
index 0000000000..f1ff435c0a
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/miscellaneous.go
@@ -0,0 +1,19 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package utils offers a number of high level helpers to work with the configuration
+package utils
+
+import (
+ "path/filepath"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+)
+
+// ConfFileDirectory returns the absolute path to the folder containing the config
+// file used to populate the registry
+func ConfFileDirectory(c pkgconfigmodel.Reader) string {
+ return filepath.Dir(c.ConfigFileUsed())
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/tags.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/tags.go
new file mode 100644
index 0000000000..5a5d3afa16
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/tags.go
@@ -0,0 +1,30 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package utils
+
+import (
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+)
+
+// GetConfiguredTags returns list of tags from a configuration, based on
+// `tags` (DD_TAGS) and `extra_tags“ (DD_EXTRA_TAGS), with `dogstatsd_tags` (DD_DOGSTATSD_TAGS)
+// if includeDogdstatsd is true.
+func GetConfiguredTags(c pkgconfigmodel.Reader, includeDogstatsd bool) []string {
+ tags := c.GetStringSlice("tags")
+ extraTags := c.GetStringSlice("extra_tags")
+
+ var dsdTags []string
+ if includeDogstatsd {
+ dsdTags = c.GetStringSlice("dogstatsd_tags")
+ }
+
+ combined := make([]string, 0, len(tags)+len(extraTags)+len(dsdTags))
+ combined = append(combined, tags...)
+ combined = append(combined, extraTags...)
+ combined = append(combined, dsdTags...)
+
+ return combined
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/telemetry.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/telemetry.go
new file mode 100644
index 0000000000..61ee1393d3
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/telemetry.go
@@ -0,0 +1,47 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package utils
+
+import (
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+)
+
+// IsCheckTelemetryEnabled returns if we want telemetry for the given check.
+// Returns true if a * is present in the telemetry.checks list.
+func IsCheckTelemetryEnabled(checkName string, cfg pkgconfigmodel.Reader) bool {
+ // when agent_telemetry is enabled, we enable telemetry for every check
+ if isAgentTelemetryEnabled(cfg) {
+ return true
+ }
+
+ // false if telemetry is disabled
+ if !IsTelemetryEnabled(cfg) {
+ return false
+ }
+
+ // by default, we don't enable telemetry for every checks stats
+ if cfg.IsSet("telemetry.checks") {
+ for _, check := range cfg.GetStringSlice("telemetry.checks") {
+ if check == "*" {
+ return true
+ } else if check == checkName {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// IsTelemetryEnabled returns whether or not telemetry is enabled
+func IsTelemetryEnabled(cfg pkgconfigmodel.Reader) bool {
+ return cfg.IsSet("telemetry.enabled") && cfg.GetBool("telemetry.enabled") ||
+ (isAgentTelemetryEnabled(cfg))
+}
+
+// isAgentTelemetryEnabled returns whether or not agent telemetry is enabled
+func isAgentTelemetryEnabled(cfg pkgconfigmodel.Reader) bool {
+ return cfg.IsSet("agent_telemetry.enabled") && cfg.GetBool("agent_telemetry.enabled")
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/trace.go b/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/trace.go
new file mode 100644
index 0000000000..a1103bf505
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/config/utils/trace.go
@@ -0,0 +1,35 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package utils
+
+import (
+ "strings"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+// GetTraceAgentDefaultEnv returns the default env for the trace agent
+func GetTraceAgentDefaultEnv(c pkgconfigmodel.Reader) string {
+ defaultEnv := ""
+ if c.IsSet("apm_config.env") {
+ defaultEnv = c.GetString("apm_config.env")
+ log.Debugf("Setting DefaultEnv to %q (from apm_config.env)", defaultEnv)
+ } else if c.IsSet("env") {
+ defaultEnv = c.GetString("env")
+ log.Debugf("Setting DefaultEnv to %q (from 'env' config option)", defaultEnv)
+ } else {
+ for _, tag := range GetConfiguredTags(c, false) {
+ if strings.HasPrefix(tag, "env:") {
+ defaultEnv = strings.TrimPrefix(tag, "env:")
+ log.Debugf("Setting DefaultEnv to %q (from `env:` entry under the 'tags' config option: %q)", defaultEnv, tag)
+ return defaultEnv
+ }
+ }
+ }
+
+ return defaultEnv
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/auditor/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/logs/auditor/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/auditor/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/auditor/api_v0.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/auditor/api_v0.go
new file mode 100644
index 0000000000..ed20322d24
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/auditor/api_v0.go
@@ -0,0 +1,47 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//nolint:revive // TODO(AML) Fix revive linter
+package auditor
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "time"
+)
+
+// v0: In the first version of the auditor, we were only recording file offsets
+
+type registryEntryV0 struct {
+ Path string
+ Timestamp time.Time
+ Offset int64
+}
+
+type jsonRegistryV0 struct {
+ Version int
+ Registry map[string]registryEntryV0
+}
+
+func unmarshalRegistryV0(b []byte) (map[string]*RegistryEntry, error) {
+ var r jsonRegistryV0
+ err := json.Unmarshal(b, &r)
+ if err != nil {
+ return nil, err
+ }
+ registry := make(map[string]*RegistryEntry)
+ for identifier, entry := range r.Registry {
+ switch {
+ case entry.Offset > 0:
+ // from v0 to v1 and further, we also prefixed path with file:
+ newIdentifier := fmt.Sprintf("file:%s", identifier)
+ registry[newIdentifier] = &RegistryEntry{LastUpdated: entry.Timestamp, Offset: strconv.FormatInt(entry.Offset, 10)}
+ default:
+ // no valid offset for this entry
+ }
+ }
+ return registry, nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/auditor/api_v1.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/auditor/api_v1.go
new file mode 100644
index 0000000000..fe2baa7008
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/auditor/api_v1.go
@@ -0,0 +1,45 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package auditor
+
+import (
+ "encoding/json"
+ "strconv"
+ "time"
+)
+
+// v1: In the second version of the auditor, Timestamp became LastUpdated and we added Timestamp to record container offsets.
+
+type registryEntryV1 struct {
+ Timestamp string
+ Offset int64
+ LastUpdated time.Time
+}
+
+type jsonRegistryV1 struct {
+ Version int
+ Registry map[string]registryEntryV1
+}
+
+func unmarshalRegistryV1(b []byte) (map[string]*RegistryEntry, error) {
+ var r jsonRegistryV1
+ err := json.Unmarshal(b, &r)
+ if err != nil {
+ return nil, err
+ }
+ registry := make(map[string]*RegistryEntry)
+ for identifier, entry := range r.Registry {
+ switch {
+ case entry.Offset > 0:
+ registry[identifier] = &RegistryEntry{LastUpdated: entry.LastUpdated, Offset: strconv.FormatInt(entry.Offset, 10)}
+ case entry.Timestamp != "":
+ registry[identifier] = &RegistryEntry{LastUpdated: entry.LastUpdated, Offset: entry.Timestamp}
+ default:
+ // no valid offset for this entry
+ }
+ }
+ return registry, nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/auditor/api_v2.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/auditor/api_v2.go
new file mode 100644
index 0000000000..3bcf03f09d
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/auditor/api_v2.go
@@ -0,0 +1,27 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package auditor
+
+import (
+ "encoding/json"
+)
+
+// v2: In the third version of the auditor, we dropped Timestamp and used a generic Offset instead to reinforce the separation of concerns
+// between the auditor and log sources.
+
+func unmarshalRegistryV2(b []byte) (map[string]*RegistryEntry, error) {
+ var r JSONRegistry
+ err := json.Unmarshal(b, &r)
+ if err != nil {
+ return nil, err
+ }
+ registry := make(map[string]*RegistryEntry)
+ for identifier, entry := range r.Registry {
+ newEntry := entry
+ registry[identifier] = &newEntry
+ }
+ return registry, nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/auditor/auditor.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/auditor/auditor.go
new file mode 100644
index 0000000000..578107bfa0
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/auditor/auditor.go
@@ -0,0 +1,332 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package auditor
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "github.com/DataDog/datadog-agent/pkg/status/health"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+
+ "github.com/DataDog/datadog-agent/comp/logs/agent/config"
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+)
+
+// DefaultRegistryFilename is the default registry filename
+const DefaultRegistryFilename = "registry.json"
+
+const defaultFlushPeriod = 1 * time.Second
+const defaultCleanupPeriod = 300 * time.Second
+
+// latest version of the API used by the auditor to retrieve the registry from disk.
+const registryAPIVersion = 2
+
+// Registry holds a list of offsets.
+type Registry interface {
+ GetOffset(identifier string) string
+ GetTailingMode(identifier string) string
+}
+
+// A RegistryEntry represents an entry in the registry where we keep track
+// of current offsets
+type RegistryEntry struct {
+ LastUpdated time.Time
+ Offset string
+ TailingMode string
+ IngestionTimestamp int64
+}
+
+// JSONRegistry represents the registry that will be written on disk
+type JSONRegistry struct {
+ Version int
+ Registry map[string]RegistryEntry
+}
+
+// An Auditor handles messages successfully submitted to the intake
+type Auditor interface {
+ Registry
+ Start()
+ Stop()
+ // Channel returns the channel to which successful payloads should be sent.
+ Channel() chan *message.Payload
+}
+
+// A RegistryAuditor is storing the Auditor information using a registry.
+type RegistryAuditor struct {
+ health *health.Handle
+ chansMutex sync.Mutex
+ inputChan chan *message.Payload
+ registry map[string]*RegistryEntry
+ registryPath string
+ registryDirPath string
+ registryTmpFile string
+ registryMutex sync.Mutex
+ entryTTL time.Duration
+ done chan struct{}
+}
+
+// New returns an initialized Auditor
+func New(runPath string, filename string, ttl time.Duration, health *health.Handle) *RegistryAuditor {
+ return &RegistryAuditor{
+ health: health,
+ registryPath: filepath.Join(runPath, filename),
+ registryDirPath: runPath,
+ registryTmpFile: filepath.Base(filename) + ".tmp",
+ entryTTL: ttl,
+ }
+}
+
+// Start starts the Auditor
+func (a *RegistryAuditor) Start() {
+ a.createChannels()
+ a.registry = a.recoverRegistry()
+ a.cleanupRegistry()
+ go a.run()
+}
+
+// Stop stops the Auditor
+func (a *RegistryAuditor) Stop() {
+ a.closeChannels()
+ a.cleanupRegistry()
+ if err := a.flushRegistry(); err != nil {
+ log.Warn(err)
+ }
+}
+
+func (a *RegistryAuditor) createChannels() {
+ a.chansMutex.Lock()
+ defer a.chansMutex.Unlock()
+ a.inputChan = make(chan *message.Payload, config.ChanSize)
+ a.done = make(chan struct{})
+}
+
+func (a *RegistryAuditor) closeChannels() {
+ a.chansMutex.Lock()
+ defer a.chansMutex.Unlock()
+ if a.inputChan != nil {
+ close(a.inputChan)
+ }
+
+ if a.done != nil {
+ <-a.done
+ a.done = nil
+ }
+ a.inputChan = nil
+}
+
+// Channel returns the channel to use to communicate with the auditor or nil
+// if the auditor is currently stopped.
+func (a *RegistryAuditor) Channel() chan *message.Payload {
+ a.chansMutex.Lock()
+ defer a.chansMutex.Unlock()
+ return a.inputChan
+}
+
+// GetOffset returns the last committed offset for a given identifier,
+// returns an empty string if it does not exist.
+func (a *RegistryAuditor) GetOffset(identifier string) string {
+ r := a.readOnlyRegistryCopy()
+ entry, exists := r[identifier]
+ if !exists {
+ return ""
+ }
+ return entry.Offset
+}
+
+// GetTailingMode returns the last committed offset for a given identifier,
+// returns an empty string if it does not exist.
+func (a *RegistryAuditor) GetTailingMode(identifier string) string {
+ r := a.readOnlyRegistryCopy()
+ entry, exists := r[identifier]
+ if !exists {
+ return ""
+ }
+ return entry.TailingMode
+}
+
+// run keeps up to date the registry depending on different events
+func (a *RegistryAuditor) run() {
+ cleanUpTicker := time.NewTicker(defaultCleanupPeriod)
+ flushTicker := time.NewTicker(defaultFlushPeriod)
+ defer func() {
+ // clean the context
+ cleanUpTicker.Stop()
+ flushTicker.Stop()
+ a.done <- struct{}{}
+ }()
+
+ var fileError sync.Once
+ for {
+ select {
+ case <-a.health.C:
+ case payload, isOpen := <-a.inputChan:
+ if !isOpen {
+ // inputChan has been closed, no need to update the registry anymore
+ return
+ }
+ // update the registry with new entry
+ for _, msg := range payload.Messages {
+ a.updateRegistry(msg.Origin.Identifier, msg.Origin.Offset, msg.Origin.LogSource.Config.TailingMode, msg.IngestionTimestamp)
+ }
+ case <-cleanUpTicker.C:
+ // remove expired offsets from registry
+ a.cleanupRegistry()
+ case <-flushTicker.C:
+ // saves current registry into disk
+ err := a.flushRegistry()
+ if err != nil {
+ if os.IsPermission(err) || os.IsNotExist(err) {
+ fileError.Do(func() {
+ log.Warn(err)
+ })
+ } else {
+ log.Warn(err)
+ }
+ }
+ }
+ }
+}
+
+// recoverRegistry rebuilds the registry from the state file found at path
+func (a *RegistryAuditor) recoverRegistry() map[string]*RegistryEntry {
+ mr, err := os.ReadFile(a.registryPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ log.Infof("Could not find state file at %q, will start with default offsets", a.registryPath)
+ } else {
+ log.Error(err)
+ }
+ return make(map[string]*RegistryEntry)
+ }
+ r, err := a.unmarshalRegistry(mr)
+ if err != nil {
+ log.Error(err)
+ return make(map[string]*RegistryEntry)
+ }
+ return r
+}
+
+// cleanupRegistry removes expired entries from the registry
+func (a *RegistryAuditor) cleanupRegistry() {
+ a.registryMutex.Lock()
+ defer a.registryMutex.Unlock()
+ expireBefore := time.Now().UTC().Add(-a.entryTTL)
+ for path, entry := range a.registry {
+ if entry.LastUpdated.Before(expireBefore) {
+ log.Debugf("TTL for %s expired, removing from registry.", path)
+ delete(a.registry, path)
+ }
+ }
+}
+
+// updateRegistry updates the registry entry matching identifier with new the offset and timestamp
+func (a *RegistryAuditor) updateRegistry(identifier string, offset string, tailingMode string, ingestionTimestamp int64) {
+ a.registryMutex.Lock()
+ defer a.registryMutex.Unlock()
+ if identifier == "" {
+ // An empty Identifier means that we don't want to track down the offset
+ // This is useful for origins that don't have offsets (networks), or when we
+ // specially want to avoid storing the offset
+ return
+ }
+
+ // Don't update the registry with a value older than the current one
+ // This can happen when dual shipping and 2 destinations are sending the same payload successfully
+ if v, ok := a.registry[identifier]; ok {
+ if v.IngestionTimestamp > ingestionTimestamp {
+ return
+ }
+ }
+
+ a.registry[identifier] = &RegistryEntry{
+ LastUpdated: time.Now().UTC(),
+ Offset: offset,
+ TailingMode: tailingMode,
+ IngestionTimestamp: ingestionTimestamp,
+ }
+}
+
+// readOnlyRegistryCopy returns a read only copy of the registry
+func (a *RegistryAuditor) readOnlyRegistryCopy() map[string]RegistryEntry {
+ a.registryMutex.Lock()
+ defer a.registryMutex.Unlock()
+ r := make(map[string]RegistryEntry)
+ for path, entry := range a.registry {
+ r[path] = *entry
+ }
+ return r
+}
+
+// flushRegistry writes on disk the registry at the given path
+func (a *RegistryAuditor) flushRegistry() error {
+ r := a.readOnlyRegistryCopy()
+ mr, err := a.marshalRegistry(r)
+ if err != nil {
+ return err
+ }
+ f, err := os.CreateTemp(a.registryDirPath, a.registryTmpFile)
+ if err != nil {
+ return err
+ }
+ tmpName := f.Name()
+ defer func() {
+ if err != nil {
+ _ = f.Close()
+ _ = os.Remove(tmpName)
+ }
+ }()
+ if _, err = f.Write(mr); err != nil {
+ return err
+ }
+
+ if err = f.Chmod(0644); err != nil {
+ return err
+ }
+
+ if err = f.Close(); err != nil {
+ return err
+ }
+ err = os.Rename(tmpName, a.registryPath)
+ return err
+}
+
+// marshalRegistry marshals a registry
+func (a *RegistryAuditor) marshalRegistry(registry map[string]RegistryEntry) ([]byte, error) {
+ r := JSONRegistry{
+ Version: registryAPIVersion,
+ Registry: registry,
+ }
+ return json.Marshal(r)
+}
+
+// unmarshalRegistry unmarshals a registry
+func (a *RegistryAuditor) unmarshalRegistry(b []byte) (map[string]*RegistryEntry, error) {
+ var r map[string]interface{}
+ err := json.Unmarshal(b, &r)
+ if err != nil {
+ return nil, err
+ }
+ version, exists := r["Version"].(float64)
+ if !exists {
+ return nil, fmt.Errorf("registry retrieved from disk must have a version number")
+ }
+ // ensure backward compatibility
+ switch int(version) {
+ case 2:
+ return unmarshalRegistryV2(b)
+ case 1:
+ return unmarshalRegistryV1(b)
+ case 0:
+ return unmarshalRegistryV0(b)
+ default:
+ return nil, fmt.Errorf("invalid registry version number")
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/auditor/null_auditor.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/auditor/null_auditor.go
new file mode 100644
index 0000000000..5e6ff02f84
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/auditor/null_auditor.go
@@ -0,0 +1,64 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package auditor
+
+import (
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+)
+
+// NullAuditor is an auditor not doing anything with the message it received.
+// It has been introduced for the Serverless Agent which doesn't need
+// to maintain a registry.
+type NullAuditor struct {
+ channel chan *message.Payload
+ stopChannel chan struct{}
+}
+
+// NewNullAuditor returns an instanciated NullAuditor. `Start()` is the next method
+// that should be used on this NullAuditor.
+func NewNullAuditor() *NullAuditor {
+ return &NullAuditor{
+ channel: make(chan *message.Payload),
+ stopChannel: make(chan struct{}),
+ }
+}
+
+// GetOffset returns an empty string.
+//
+//nolint:revive // TODO(AML) Fix revive linter
+func (a *NullAuditor) GetOffset(identifier string) string { return "" }
+
+// GetTailingMode returns an empty string.
+//
+//nolint:revive // TODO(AML) Fix revive linter
+func (a *NullAuditor) GetTailingMode(identifier string) string { return "" }
+
+// Start starts the NullAuditor main loop.
+func (a *NullAuditor) Start() {
+ go a.run()
+}
+
+// Stop stops the NullAuditor main loop.
+func (a *NullAuditor) Stop() {
+ a.stopChannel <- struct{}{}
+}
+
+// Channel returns the channel on which should be sent the messages.
+func (a *NullAuditor) Channel() chan *message.Payload {
+ return a.channel
+}
+
+func (a *NullAuditor) run() {
+ for {
+ select {
+ case <-a.channel:
+ // draining the channel, we're not doing anything with the message
+ case <-a.stopChannel:
+ // TODO(remy): close the message channel
+ return
+ }
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/destination.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/destination.go
new file mode 100644
index 0000000000..b1bfa151bf
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/destination.go
@@ -0,0 +1,23 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//nolint:revive // TODO(AML) Fix revive linter
+package client
+
+import "github.com/DataDog/datadog-agent/pkg/logs/message"
+
+// Destination sends a payload to a specific endpoint over a given network protocol.
+type Destination interface {
+ // Whether or not destination is used for Multi-Region Failover mode
+ IsMRF() bool
+
+ // Destination target (e.g. https://agent-intake.logs.datadoghq.com)
+ Target() string
+
+ // Start starts the destination send loop. close the intput to stop listening for payloads. stopChan is
+ // signaled when the destination has fully shutdown and all buffered payloads have been flushed. isRetrying is
+ // signaled when the retry state changes. isRetrying can be nil if you don't need to handle retries.
+ Start(input chan *message.Payload, output chan *message.Payload, isRetrying chan bool) (stopChan <-chan struct{})
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/destinations.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/destinations.go
new file mode 100644
index 0000000000..536ae36e23
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/destinations.go
@@ -0,0 +1,20 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package client
+
+// Destinations encapsulates a set of log destinations, distinguishing reliable vs unreliable destinations
+type Destinations struct {
+ Reliable []Destination
+ Unreliable []Destination
+}
+
+// NewDestinations returns a new destinations composite.
+func NewDestinations(reliable []Destination, unreliable []Destination) *Destinations {
+ return &Destinations{
+ Reliable: reliable,
+ Unreliable: unreliable,
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/destinations_context.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/destinations_context.go
new file mode 100644
index 0000000000..c8bbfbefae
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/destinations_context.go
@@ -0,0 +1,49 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package client
+
+import (
+ "context"
+ "sync"
+)
+
+// A DestinationsContext manages senders and allows us to "unclog" the pipeline
+// when trying to stop it and failing to send messages.
+type DestinationsContext struct {
+ context context.Context
+ cancel context.CancelFunc
+ mutex sync.Mutex
+}
+
+// NewDestinationsContext returns an initialized DestinationsContext
+func NewDestinationsContext() *DestinationsContext {
+ return &DestinationsContext{}
+}
+
+// Start creates a context that will be cancelled on Stop()
+func (dc *DestinationsContext) Start() {
+ dc.mutex.Lock()
+ defer dc.mutex.Unlock()
+ dc.context, dc.cancel = context.WithCancel(context.Background())
+}
+
+// Stop cancels the context that should be used by all senders.
+func (dc *DestinationsContext) Stop() {
+ dc.mutex.Lock()
+ defer dc.mutex.Unlock()
+ if dc.cancel != nil {
+ dc.cancel()
+ dc.cancel = nil
+ }
+ // Here we keep the cancelled context to make sure in-flight destination get it.
+}
+
+// Context allows one to access the current context of this DestinationsContext.
+func (dc *DestinationsContext) Context() context.Context {
+ dc.mutex.Lock()
+ defer dc.mutex.Unlock()
+ return dc.context
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/errors.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/errors.go
new file mode 100644
index 0000000000..ae8cbd9ff0
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/errors.go
@@ -0,0 +1,23 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package client
+
+// RetryableError represents an error that can occur when sending a payload.
+type RetryableError struct {
+ err error
+}
+
+// NewRetryableError returns a new destination error.
+func NewRetryableError(err error) *RetryableError {
+ return &RetryableError{
+ err: err,
+ }
+}
+
+// RetryableError returns the message of the error.
+func (e *RetryableError) Error() string {
+ return e.err.Error()
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/http/destination.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/http/destination.go
new file mode 100644
index 0000000000..954397f988
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/http/destination.go
@@ -0,0 +1,459 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//nolint:revive // TODO(AML) Fix revive linter
+package http
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "expvar"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/DataDog/datadog-agent/comp/logs/agent/config"
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ "github.com/DataDog/datadog-agent/pkg/logs/client"
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/logs/metrics"
+ "github.com/DataDog/datadog-agent/pkg/telemetry"
+ "github.com/DataDog/datadog-agent/pkg/util/backoff"
+ httputils "github.com/DataDog/datadog-agent/pkg/util/http"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+ "github.com/DataDog/datadog-agent/pkg/version"
+)
+
+// ContentType options,
+const (
+ TextContentType = "text/plain"
+ JSONContentType = "application/json"
+ ProtobufContentType = "application/x-protobuf"
+)
+
+// HTTP errors.
+var (
+ errClient = errors.New("client error")
+ errServer = errors.New("server error")
+ tlmSend = telemetry.NewCounter("logs_client_http_destination", "send", []string{"endpoint_host", "error"}, "Payloads sent")
+ tlmInUse = telemetry.NewCounter("logs_client_http_destination", "in_use_ms", []string{"sender"}, "Time spent sending payloads in ms")
+ tlmIdle = telemetry.NewCounter("logs_client_http_destination", "idle_ms", []string{"sender"}, "Time spent idle while not sending payloads in ms")
+ tlmDropped = telemetry.NewCounterWithOpts("logs_client_http_destination", "payloads_dropped", []string{}, "Number of payloads dropped because of unrecoverable errors", telemetry.Options{DefaultMetric: true})
+
+ expVarIdleMsMapKey = "idleMs"
+ expVarInUseMsMapKey = "inUseMs"
+)
+
+// emptyJsonPayload is an empty payload used to check HTTP connectivity without sending logs.
+//
+//nolint:revive // TODO(AML) Fix revive linter
+var emptyJsonPayload = message.Payload{Messages: []*message.Message{}, Encoded: []byte("{}")}
+
+// Destination sends a payload over HTTP.
+type Destination struct {
+ // Config
+ url string
+ endpoint config.Endpoint
+ contentType string
+ host string
+ client *httputils.ResetClient
+ destinationsContext *client.DestinationsContext
+ protocol config.IntakeProtocol
+ origin config.IntakeOrigin
+ isMRF bool
+
+ // Concurrency
+ climit chan struct{} // semaphore for limiting concurrent background sends
+ wg sync.WaitGroup
+
+ // Retry
+ backoff backoff.Policy
+ nbErrors int
+ retryLock sync.Mutex
+ shouldRetry bool
+ lastRetryError error
+
+ // Telemetry
+ expVars *expvar.Map
+ telemetryName string
+}
+
+// NewDestination returns a new Destination.
+// If `maxConcurrentBackgroundSends` > 0, then at most that many background payloads will be sent concurrently, else
+// there is no concurrency and the background sending pipeline will block while sending each payload.
+// TODO: add support for SOCKS5
+func NewDestination(endpoint config.Endpoint,
+ contentType string,
+ destinationsContext *client.DestinationsContext,
+ maxConcurrentBackgroundSends int,
+ shouldRetry bool,
+ telemetryName string,
+ cfg pkgconfigmodel.Reader) *Destination {
+
+ return newDestination(endpoint,
+ contentType,
+ destinationsContext,
+ time.Second*10,
+ maxConcurrentBackgroundSends,
+ shouldRetry,
+ telemetryName,
+ cfg)
+}
+
+func newDestination(endpoint config.Endpoint,
+ contentType string,
+ destinationsContext *client.DestinationsContext,
+ timeout time.Duration,
+ maxConcurrentBackgroundSends int,
+ shouldRetry bool,
+ telemetryName string,
+ cfg pkgconfigmodel.Reader) *Destination {
+
+ if maxConcurrentBackgroundSends <= 0 {
+ maxConcurrentBackgroundSends = 1
+ }
+ policy := backoff.NewExpBackoffPolicy(
+ endpoint.BackoffFactor,
+ endpoint.BackoffBase,
+ endpoint.BackoffMax,
+ endpoint.RecoveryInterval,
+ endpoint.RecoveryReset,
+ )
+
+ expVars := &expvar.Map{}
+ expVars.AddFloat(expVarIdleMsMapKey, 0)
+ expVars.AddFloat(expVarInUseMsMapKey, 0)
+ if telemetryName != "" {
+ metrics.DestinationExpVars.Set(telemetryName, expVars)
+ }
+
+ return &Destination{
+ host: endpoint.Host,
+ url: buildURL(endpoint),
+ endpoint: endpoint,
+ contentType: contentType,
+ client: httputils.NewResetClient(endpoint.ConnectionResetInterval, httpClientFactory(timeout, cfg)),
+ destinationsContext: destinationsContext,
+ climit: make(chan struct{}, maxConcurrentBackgroundSends),
+ wg: sync.WaitGroup{},
+ backoff: policy,
+ protocol: endpoint.Protocol,
+ origin: endpoint.Origin,
+ lastRetryError: nil,
+ retryLock: sync.Mutex{},
+ shouldRetry: shouldRetry,
+ expVars: expVars,
+ telemetryName: telemetryName,
+ isMRF: endpoint.IsMRF,
+ }
+}
+
+func errorToTag(err error) string {
+ if err == nil {
+ return "none"
+ } else if _, ok := err.(*client.RetryableError); ok {
+ return "retryable"
+ } else {
+ return "non-retryable"
+ }
+}
+
+// IsMRF indicates that this destination is a Multi-Region Failover destination.
+func (d *Destination) IsMRF() bool {
+ return d.isMRF
+}
+
+// Target is the address of the destination.
+func (d *Destination) Target() string {
+ return d.url
+}
+
+// Start starts reading the input channel
+func (d *Destination) Start(input chan *message.Payload, output chan *message.Payload, isRetrying chan bool) (stopChan <-chan struct{}) {
+ stop := make(chan struct{})
+ go d.run(input, output, stop, isRetrying)
+ return stop
+}
+
+func (d *Destination) run(input chan *message.Payload, output chan *message.Payload, stopChan chan struct{}, isRetrying chan bool) {
+ var startIdle = time.Now()
+
+ for p := range input {
+ idle := float64(time.Since(startIdle) / time.Millisecond)
+ d.expVars.AddFloat(expVarIdleMsMapKey, idle)
+ tlmIdle.Add(idle, d.telemetryName)
+ var startInUse = time.Now()
+
+ d.sendConcurrent(p, output, isRetrying)
+
+ inUse := float64(time.Since(startInUse) / time.Millisecond)
+ d.expVars.AddFloat(expVarInUseMsMapKey, inUse)
+ tlmInUse.Add(inUse, d.telemetryName)
+ startIdle = time.Now()
+ }
+ // Wait for any pending concurrent sends to finish or terminate
+ d.wg.Wait()
+
+ d.updateRetryState(nil, isRetrying)
+ stopChan <- struct{}{}
+}
+
+func (d *Destination) sendConcurrent(payload *message.Payload, output chan *message.Payload, isRetrying chan bool) {
+ d.wg.Add(1)
+ d.climit <- struct{}{}
+ go func() {
+ defer func() {
+ <-d.climit
+ d.wg.Done()
+ }()
+ d.sendAndRetry(payload, output, isRetrying)
+ }()
+}
+
+// Send sends a payload over HTTP,
+func (d *Destination) sendAndRetry(payload *message.Payload, output chan *message.Payload, isRetrying chan bool) {
+ for {
+
+ d.retryLock.Lock()
+ nbErrors := d.nbErrors
+ d.retryLock.Unlock()
+ backoffDuration := d.backoff.GetBackoffDuration(nbErrors)
+ blockedUntil := time.Now().Add(backoffDuration)
+ if blockedUntil.After(time.Now()) {
+ log.Warnf("%s: sleeping until %v before retrying. Backoff duration %s due to %d errors", d.url, blockedUntil, backoffDuration.String(), nbErrors)
+ d.waitForBackoff(blockedUntil)
+ metrics.RetryTimeSpent.Add(int64(backoffDuration))
+ metrics.RetryCount.Add(1)
+ metrics.TlmRetryCount.Add(1)
+ }
+
+ err := d.unconditionalSend(payload)
+
+ if err != nil {
+ metrics.DestinationErrors.Add(1)
+ metrics.TlmDestinationErrors.Inc()
+
+ // shouldRetry is false for serverless. This log line is too verbose for serverless so make it debug only.
+ if d.shouldRetry {
+ log.Warnf("Could not send payload: %v", err)
+ } else {
+ log.Debugf("Could not send payload: %v", err)
+ }
+ }
+
+ if err == context.Canceled {
+ d.updateRetryState(nil, isRetrying)
+ return
+ }
+
+ if d.shouldRetry {
+ if d.updateRetryState(err, isRetrying) {
+ continue
+ }
+ }
+
+ metrics.LogsSent.Add(int64(len(payload.Messages)))
+ metrics.TlmLogsSent.Add(float64(len(payload.Messages)))
+ output <- payload
+ return
+ }
+}
+
+func (d *Destination) unconditionalSend(payload *message.Payload) (err error) {
+ defer func() {
+ tlmSend.Inc(d.host, errorToTag(err))
+ }()
+
+ ctx := d.destinationsContext.Context()
+
+ if err != nil {
+ return err
+ }
+ metrics.BytesSent.Add(int64(payload.UnencodedSize))
+ metrics.TlmBytesSent.Add(float64(payload.UnencodedSize))
+ metrics.EncodedBytesSent.Add(int64(len(payload.Encoded)))
+ metrics.TlmEncodedBytesSent.Add(float64(len(payload.Encoded)))
+
+ req, err := http.NewRequest("POST", d.url, bytes.NewReader(payload.Encoded))
+ if err != nil {
+ // the request could not be built,
+ // this can happen when the method or the url are valid.
+ return err
+ }
+ req.Header.Set("DD-API-KEY", d.endpoint.GetAPIKey())
+ req.Header.Set("Content-Type", d.contentType)
+ req.Header.Set("User-Agent", fmt.Sprintf("datadog-agent/%s", version.AgentVersion))
+
+ if payload.Encoding != "" {
+ req.Header.Set("Content-Encoding", payload.Encoding)
+ }
+ if d.protocol != "" {
+ req.Header.Set("DD-PROTOCOL", string(d.protocol))
+ }
+ if d.origin != "" {
+ req.Header.Set("DD-EVP-ORIGIN", string(d.origin))
+ req.Header.Set("DD-EVP-ORIGIN-VERSION", version.AgentVersion)
+ }
+ req.Header.Set("dd-message-timestamp", strconv.FormatInt(getMessageTimestamp(payload.Messages), 10))
+ then := time.Now()
+ req.Header.Set("dd-current-timestamp", strconv.FormatInt(then.UnixMilli(), 10))
+
+ req = req.WithContext(ctx)
+ resp, err := d.client.Do(req)
+
+ latency := time.Since(then).Milliseconds()
+ metrics.TlmSenderLatency.Observe(float64(latency))
+ metrics.SenderLatency.Set(latency)
+
+ if err != nil {
+ if ctx.Err() == context.Canceled {
+ return ctx.Err()
+ }
+ // most likely a network or a connect error, the callee should retry.
+ return client.NewRetryableError(err)
+ }
+
+ defer resp.Body.Close()
+ response, err := io.ReadAll(resp.Body)
+ if err != nil {
+ // the read failed because the server closed or terminated the connection
+ // *after* serving the request.
+ log.Debugf("Server closed or terminated the connection after serving the request with err %v", err)
+ return err
+ }
+
+ metrics.DestinationHttpRespByStatusAndUrl.Add(strconv.Itoa(resp.StatusCode), 1)
+ metrics.TlmDestinationHttpRespByStatusAndUrl.Inc(strconv.Itoa(resp.StatusCode), d.url)
+
+ if resp.StatusCode >= http.StatusBadRequest {
+ log.Warnf("failed to post http payload. code=%d host=%s response=%s", resp.StatusCode, d.host, string(response))
+ }
+ if resp.StatusCode == http.StatusBadRequest ||
+ resp.StatusCode == http.StatusUnauthorized ||
+ resp.StatusCode == http.StatusForbidden ||
+ resp.StatusCode == http.StatusRequestEntityTooLarge {
+ // the logs-agent is likely to be misconfigured,
+ // the URL or the API key may be wrong.
+ tlmDropped.Inc()
+ return errClient
+ } else if resp.StatusCode > http.StatusBadRequest {
+ // the server could not serve the request, most likely because of an
+ // internal error. We should retry these requests.
+ return client.NewRetryableError(errServer)
+ } else {
+ return nil
+ }
+}
+
+func (d *Destination) updateRetryState(err error, isRetrying chan bool) bool {
+ d.retryLock.Lock()
+ defer d.retryLock.Unlock()
+
+ if _, ok := err.(*client.RetryableError); ok {
+ d.nbErrors = d.backoff.IncError(d.nbErrors)
+ if isRetrying != nil && d.lastRetryError == nil {
+ isRetrying <- true
+ }
+ d.lastRetryError = err
+
+ return true
+ } else { //nolint:revive // TODO(AML) Fix revive linter
+ d.nbErrors = d.backoff.DecError(d.nbErrors)
+ if isRetrying != nil && d.lastRetryError != nil {
+ isRetrying <- false
+ }
+ d.lastRetryError = nil
+
+ return false
+ }
+}
+
+func httpClientFactory(timeout time.Duration, cfg pkgconfigmodel.Reader) func() *http.Client {
+ return func() *http.Client {
+ return &http.Client{
+ Timeout: timeout,
+ // reusing core agent HTTP transport to benefit from proxy settings.
+ Transport: httputils.CreateHTTPTransport(cfg),
+ }
+ }
+}
+
+// buildURL buils a url from a config endpoint.
+func buildURL(endpoint config.Endpoint) string {
+ var scheme string
+ if endpoint.UseSSL() {
+ scheme = "https"
+ } else {
+ scheme = "http"
+ }
+ var address string
+ if endpoint.Port != 0 {
+ address = fmt.Sprintf("%v:%v", endpoint.Host, endpoint.Port)
+ } else {
+ address = endpoint.Host
+ }
+ url := url.URL{
+ Scheme: scheme,
+ Host: address,
+ }
+ if endpoint.Version == config.EPIntakeVersion2 && endpoint.TrackType != "" {
+ url.Path = fmt.Sprintf("/api/v2/%s", endpoint.TrackType)
+ } else {
+ url.Path = "/v1/input"
+ }
+ return url.String()
+}
+
+func getMessageTimestamp(messages []*message.Message) int64 {
+ timestampNanos := int64(-1)
+ if len(messages) > 0 {
+ timestampNanos = messages[len(messages)-1].IngestionTimestamp
+ }
+ return timestampNanos / int64(time.Millisecond/time.Nanosecond)
+}
+
+func prepareCheckConnectivity(endpoint config.Endpoint, cfg pkgconfigmodel.Reader) (*client.DestinationsContext, *Destination) {
+ ctx := client.NewDestinationsContext()
+ // Lower the timeout to 5s because HTTP connectivity test is done synchronously during the agent bootstrap sequence
+ destination := newDestination(endpoint, JSONContentType, ctx, time.Second*5, 0, false, "", cfg)
+ return ctx, destination
+}
+
+func completeCheckConnectivity(ctx *client.DestinationsContext, destination *Destination) error {
+ ctx.Start()
+ defer ctx.Stop()
+ return destination.unconditionalSend(&emptyJsonPayload)
+}
+
+// CheckConnectivity check if sending logs through HTTP works
+func CheckConnectivity(endpoint config.Endpoint, cfg pkgconfigmodel.Reader) config.HTTPConnectivity {
+ log.Info("Checking HTTP connectivity...")
+ ctx, destination := prepareCheckConnectivity(endpoint, cfg)
+ log.Infof("Sending HTTP connectivity request to %s...", destination.url)
+ err := completeCheckConnectivity(ctx, destination)
+ if err != nil {
+ log.Warnf("HTTP connectivity failure: %v", err)
+ } else {
+ log.Info("HTTP connectivity successful")
+ }
+ return err == nil
+}
+
+//nolint:revive // TODO(AML) Fix revive linter
+func CheckConnectivityDiagnose(endpoint config.Endpoint, cfg pkgconfigmodel.Reader) (url string, err error) {
+ ctx, destination := prepareCheckConnectivity(endpoint, cfg)
+ return destination.url, completeCheckConnectivity(ctx, destination)
+}
+
+func (d *Destination) waitForBackoff(blockedUntil time.Time) {
+ ctx, cancel := context.WithDeadline(d.destinationsContext.Context(), blockedUntil)
+ defer cancel()
+ <-ctx.Done()
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/http/test_utils.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/http/test_utils.go
new file mode 100644
index 0000000000..98dea19207
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/http/test_utils.go
@@ -0,0 +1,106 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package http
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/DataDog/datadog-agent/comp/logs/agent/config"
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ "github.com/DataDog/datadog-agent/pkg/logs/client"
+)
+
+// StatusCodeContainer is a lock around the status code to return
+type StatusCodeContainer struct {
+ sync.Mutex
+ statusCode int
+}
+
+// TestServer a test server
+type TestServer struct {
+ httpServer *httptest.Server
+ DestCtx *client.DestinationsContext
+ Destination *Destination
+ Endpoint config.Endpoint
+ request *http.Request
+ statusCodeContainer *StatusCodeContainer
+ stopChan chan struct{}
+}
+
+// NewTestServer creates a new test server
+func NewTestServer(statusCode int, cfg pkgconfigmodel.Reader) *TestServer {
+ return NewTestServerWithOptions(statusCode, 0, true, nil, cfg)
+}
+
+// NewTestServerWithOptions creates a new test server with concurrency and response control
+func NewTestServerWithOptions(statusCode int, senders int, retryDestination bool, respondChan chan int, cfg pkgconfigmodel.Reader) *TestServer {
+ statusCodeContainer := &StatusCodeContainer{statusCode: statusCode}
+ var request http.Request
+ var mu = sync.Mutex{}
+ var stopChan = make(chan struct{}, 1)
+ stopped := false
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ statusCodeContainer.Lock()
+ code := statusCodeContainer.statusCode
+ w.WriteHeader(statusCodeContainer.statusCode)
+ statusCodeContainer.Unlock()
+ mu.Lock()
+ if stopped {
+ mu.Unlock()
+ return
+ }
+
+ request = *r
+ if respondChan != nil {
+ select {
+ case respondChan <- code:
+ case <-stopChan:
+ stopped = true
+ }
+ }
+ mu.Unlock()
+ }))
+ url := strings.Split(ts.URL, ":")
+ port, _ := strconv.Atoi(url[2])
+ destCtx := client.NewDestinationsContext()
+ destCtx.Start()
+
+ endpoint := config.NewEndpoint("test", strings.Replace(url[1], "/", "", -1), port, false)
+ endpoint.BackoffFactor = 1
+ endpoint.BackoffBase = 1
+ endpoint.BackoffMax = 10
+ endpoint.RecoveryInterval = 1
+
+ dest := NewDestination(endpoint, JSONContentType, destCtx, senders, retryDestination, "test", cfg)
+ return &TestServer{
+ httpServer: ts,
+ DestCtx: destCtx,
+ Destination: dest,
+ Endpoint: endpoint,
+ request: &request,
+ statusCodeContainer: statusCodeContainer,
+ stopChan: stopChan,
+ }
+}
+
+// Stop stops the server
+func (s *TestServer) Stop() {
+ s.stopChan <- struct{}{}
+ s.DestCtx.Stop()
+ s.httpServer.Close()
+}
+
+// ChangeStatus changes the status to return
+func (s *TestServer) ChangeStatus(statusCode int) {
+ s.statusCodeContainer.Lock()
+ s.statusCodeContainer.statusCode = statusCode
+ s.statusCodeContainer.Unlock()
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/tcp/connection_manager.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/tcp/connection_manager.go
new file mode 100644
index 0000000000..39b4ce841d
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/tcp/connection_manager.go
@@ -0,0 +1,200 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//nolint:revive // TODO(AML) Fix revive linter
+package tcp
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "math/rand"
+ "net"
+ "strconv"
+ "sync"
+ "time"
+
+ "golang.org/x/net/proxy"
+
+ "github.com/DataDog/datadog-agent/comp/logs/agent/config"
+ "github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+const (
+ maxExpBackoffCount = 7
+ connectionTimeout = 20 * time.Second
+ statusConnectionError = "connection_error"
+)
+
+// A ConnectionManager manages connections
+type ConnectionManager struct {
+ endpoint config.Endpoint
+ mutex sync.Mutex
+ firstConn sync.Once
+ status statusinterface.Status
+}
+
+// NewConnectionManager returns an initialized ConnectionManager
+func NewConnectionManager(endpoint config.Endpoint, status statusinterface.Status) *ConnectionManager {
+ return &ConnectionManager{
+ endpoint: endpoint,
+ status: status,
+ }
+}
+
+type tlsTimeoutError struct{}
+
+func (tlsTimeoutError) Error() string {
+ return "tls: Handshake timed out"
+}
+
+// NewConnection returns an initialized connection to the intake.
+// It blocks until a connection is available
+func (cm *ConnectionManager) NewConnection(ctx context.Context) (net.Conn, error) {
+ cm.mutex.Lock()
+ defer cm.mutex.Unlock()
+
+ cm.firstConn.Do(func() {
+ if cm.endpoint.ProxyAddress != "" {
+ log.Infof("Connecting to the backend: %v, via socks5: %v, with SSL: %v", cm.address(), cm.endpoint.ProxyAddress, cm.endpoint.UseSSL())
+ } else {
+ log.Infof("Connecting to the backend: %v, with SSL: %v", cm.address(), cm.endpoint.UseSSL())
+ }
+ })
+
+ var retries uint
+ var err error
+ for {
+ if err != nil {
+ cm.status.AddGlobalWarning(statusConnectionError, fmt.Sprintf("Connection to the log intake cannot be established: %v", err))
+ }
+ if retries > 0 {
+ log.Debugf("Connect attempt #%d", retries)
+ cm.backoff(ctx, retries)
+ }
+ retries++
+
+ // Check if we should continue.
+ select {
+ // This is the normal shutdown path when the caller is stopped.
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ // Continue.
+ }
+
+ var conn net.Conn
+ if cm.endpoint.ProxyAddress != "" {
+ var dialer proxy.Dialer
+ dialer, err = proxy.SOCKS5("tcp", cm.endpoint.ProxyAddress, nil, proxy.Direct)
+ if err != nil {
+ log.Warn(err)
+ continue
+ }
+ // TODO: handle timeouts with ctx.
+ conn, err = dialer.Dial("tcp", cm.address())
+ } else {
+ var dialer net.Dialer
+ dctx, cancel := context.WithTimeout(ctx, connectionTimeout)
+ defer cancel()
+ conn, err = dialer.DialContext(dctx, "tcp", cm.address())
+ }
+ if err != nil {
+ log.Warn(err)
+ continue
+ }
+ log.Debugf("connected to %v", cm.address())
+
+ if cm.endpoint.UseSSL() {
+ sslConn := tls.Client(conn, &tls.Config{
+ ServerName: cm.endpoint.Host,
+ })
+ err = cm.handshakeWithTimeout(sslConn, connectionTimeout)
+ if err != nil {
+ log.Warn(err)
+ continue
+ }
+ log.Debug("SSL handshake successful")
+ conn = sslConn
+ }
+
+ go cm.handleServerClose(conn)
+ cm.status.RemoveGlobalWarning(statusConnectionError)
+ return conn, nil
+ }
+}
+
+func (cm *ConnectionManager) handshakeWithTimeout(conn *tls.Conn, timeout time.Duration) error {
+ errChannel := make(chan error, 2)
+ time.AfterFunc(timeout, func() {
+ errChannel <- tlsTimeoutError{}
+ })
+ go func() {
+ log.Debug("Start TLS handshake")
+ errChannel <- conn.Handshake()
+ log.Debug("TLS handshake ended")
+ }()
+ return <-errChannel
+}
+
+// address returns the address of the server to send logs to.
+func (cm *ConnectionManager) address() string {
+ return net.JoinHostPort(cm.endpoint.Host, strconv.Itoa(cm.endpoint.Port))
+}
+
+// ShouldReset returns whether the connection should be reset, depending on the endpoint's config
+// and the passed connection creation time.
+func (cm *ConnectionManager) ShouldReset(connCreationTime time.Time) bool {
+ return cm.endpoint.ConnectionResetInterval != 0 && time.Since(connCreationTime) > cm.endpoint.ConnectionResetInterval
+}
+
+// CloseConnection closes a connection on the client side
+func (cm *ConnectionManager) CloseConnection(conn net.Conn) {
+ conn.Close()
+ log.Debug("Connection closed")
+}
+
+// handleServerClose lets the connection manager detect when a connection
+// has been closed by the server, and closes it for the client.
+// This is not strictly necessary but a good safeguard against callers
+// that might not handle errors properly.
+func (cm *ConnectionManager) handleServerClose(conn net.Conn) {
+ for {
+ buff := make([]byte, 1)
+ _, err := conn.Read(buff)
+ switch {
+ case err == nil:
+ case errors.Is(err, net.ErrClosed):
+ // Connection already closed, expected
+ return
+ case err == io.EOF:
+ cm.CloseConnection(conn)
+ return
+ default:
+ log.Warn(err)
+ return
+ }
+ }
+}
+
+// backoff implements a randomized exponential backoff in case of connection failure
+// each invocation will trigger a sleep between [2^(retries-1), 2^retries) second
+// the exponent is capped at 7, which translates to max sleep between ~1min and ~2min
+func (cm *ConnectionManager) backoff(ctx context.Context, retries uint) {
+ if retries > maxExpBackoffCount {
+ retries = maxExpBackoffCount
+ }
+
+ backoffMax := 1 << retries
+ backoffMin := 1 << (retries - 1)
+ backoffDuration := time.Duration(backoffMin+rand.Intn(backoffMax-backoffMin)) * time.Second
+
+ ctx, cancel := context.WithTimeout(ctx, backoffDuration)
+ defer cancel()
+ <-ctx.Done()
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/tcp/delimiter.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/tcp/delimiter.go
new file mode 100644
index 0000000000..4d9b402bd9
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/tcp/delimiter.go
@@ -0,0 +1,55 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package tcp
+
+import (
+ "encoding/binary"
+)
+
+// Delimiter is responsible for adding delimiters to the frames being sent.
+type Delimiter interface {
+ delimit(content []byte) ([]byte, error)
+}
+
+// NewDelimiter returns a delimiter.
+func NewDelimiter(useProto bool) Delimiter {
+ if useProto {
+ return lengthPrefixDelimiter{}
+ }
+ return lineBreakDelimiter{}
+}
+
+// lengthPrefixDelimiter is a delimiter that prepends the length of each message as an unsigned 32-bit integer, encoded in
+// binary (big-endian).
+//
+// For example:
+// BEFORE ENCODE (300 bytes) AFTER ENCODE (302 bytes)
+// +---------------+ +--------+---------------+
+// | Raw Data |-------------->| Length | Raw Data |
+// | (300 bytes) | | 0xAC02 | (300 bytes) |
+// +---------------+ +--------+---------------+
+type lengthPrefixDelimiter struct{}
+
+func (l lengthPrefixDelimiter) delimit(content []byte) ([]byte, error) {
+ buf := make([]byte, 4+len(content))
+ binary.BigEndian.PutUint32(buf[:4], uint32(len(content)))
+ copy(buf[4:], content)
+ return buf, nil
+}
+
+// lineBreakDelimiter is a delimiter that appends a line break after each message.
+//
+// For example:
+// BEFORE ENCODE (300 bytes) AFTER ENCODE (301 bytes)
+// +---------------+ +---------------+------------+
+// | Raw Data |-------------->| Raw Data | Line Break |
+// | (300 bytes) | | (300 bytes) | 0x0A |
+// +---------------+ +---------------+------------+
+type lineBreakDelimiter struct{}
+
+func (l lineBreakDelimiter) delimit(content []byte) ([]byte, error) {
+ return append(content, '\n'), nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/tcp/destination.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/tcp/destination.go
new file mode 100644
index 0000000000..f0ec9c1520
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/tcp/destination.go
@@ -0,0 +1,155 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package tcp
+
+import (
+ "expvar"
+ "net"
+ "sync"
+ "time"
+
+ "github.com/DataDog/datadog-agent/comp/logs/agent/config"
+ "github.com/DataDog/datadog-agent/pkg/logs/client"
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/logs/metrics"
+ "github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+// Destination is responsible for shipping logs to a remote server over TCP.
+type Destination struct {
+ prefixer *prefixer
+ delimiter Delimiter
+ connManager *ConnectionManager
+ destinationsContext *client.DestinationsContext
+ conn net.Conn
+ connCreationTime time.Time
+ shouldRetry bool
+ retryLock sync.Mutex
+ lastRetryError error
+ isMRF bool
+}
+
+// NewDestination returns a new destination.
+func NewDestination(endpoint config.Endpoint, useProto bool, destinationsContext *client.DestinationsContext, shouldRetry bool, status statusinterface.Status) *Destination {
+ metrics.DestinationLogsDropped.Set(endpoint.Host, &expvar.Int{})
+ return &Destination{
+ prefixer: newPrefixer(endpoint.GetAPIKey),
+ delimiter: NewDelimiter(useProto),
+ connManager: NewConnectionManager(endpoint, status),
+ destinationsContext: destinationsContext,
+ retryLock: sync.Mutex{},
+ shouldRetry: shouldRetry,
+ lastRetryError: nil,
+ isMRF: endpoint.IsMRF,
+ }
+}
+
+// IsMRF indicates that this destination is a Multi-Region Failover destination.
+func (d *Destination) IsMRF() bool {
+ return d.isMRF
+}
+
+// Target is the address of the destination.
+func (d *Destination) Target() string {
+ return d.connManager.address()
+}
+
+// Start reads from the input, transforms a message into a frame and sends it to a remote server,
+func (d *Destination) Start(input chan *message.Payload, output chan *message.Payload, isRetrying chan bool) (stopChan <-chan struct{}) {
+ stop := make(chan struct{})
+ go func() {
+ for payload := range input {
+ d.sendAndRetry(payload, output, isRetrying)
+ }
+ d.updateRetryState(nil, isRetrying)
+ stop <- struct{}{}
+ }()
+ return stop
+}
+
+func (d *Destination) sendAndRetry(payload *message.Payload, output chan *message.Payload, isRetrying chan bool) {
+ for {
+ if d.conn == nil {
+ var err error
+
+ // We work only if we have a started destination context
+ ctx := d.destinationsContext.Context()
+ if d.conn, err = d.connManager.NewConnection(ctx); err != nil {
+ // the connection manager is not meant to fail,
+ // this can happen only when the context is cancelled.
+ d.incrementErrors(true)
+ return
+ }
+ d.connCreationTime = time.Now()
+ }
+
+ content := d.prefixer.apply(payload.Encoded)
+ frame, err := d.delimiter.delimit(content)
+ if err != nil {
+ // the delimiter can fail when the payload can not be framed correctly.
+ d.incrementErrors(true)
+ return
+ }
+
+ _, err = d.conn.Write(frame)
+ if err != nil {
+ d.connManager.CloseConnection(d.conn)
+ d.conn = nil
+
+ if d.shouldRetry {
+ d.updateRetryState(err, isRetrying)
+ d.incrementErrors(false)
+ // retry (will try to open a new connection)
+ continue
+ }
+ d.incrementErrors(true)
+ }
+
+ d.updateRetryState(nil, isRetrying)
+
+ metrics.LogsSent.Add(1)
+ metrics.TlmLogsSent.Inc()
+ metrics.BytesSent.Add(int64(payload.UnencodedSize))
+ metrics.TlmBytesSent.Add(float64(payload.UnencodedSize))
+ metrics.EncodedBytesSent.Add(int64(len(payload.Encoded)))
+ metrics.TlmEncodedBytesSent.Add(float64(len(payload.Encoded)))
+ output <- payload
+
+ if d.connManager.ShouldReset(d.connCreationTime) {
+ log.Debug("Resetting TCP connection")
+ d.connManager.CloseConnection(d.conn)
+ d.conn = nil
+ }
+ return
+ }
+}
+
+func (d *Destination) incrementErrors(drop bool) {
+ if drop {
+ host := d.connManager.endpoint.Host
+ metrics.DestinationLogsDropped.Add(host, 1)
+ metrics.TlmLogsDropped.Inc(host)
+ }
+ metrics.DestinationErrors.Add(1)
+ metrics.TlmDestinationErrors.Inc()
+}
+
+func (d *Destination) updateRetryState(err error, isRetrying chan bool) {
+ d.retryLock.Lock()
+ defer d.retryLock.Unlock()
+
+ if err != nil {
+ if isRetrying != nil && d.lastRetryError == nil {
+ isRetrying <- true
+ }
+ } else {
+ if isRetrying != nil && d.lastRetryError != nil {
+ isRetrying <- false
+ }
+ }
+ d.lastRetryError = err
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/tcp/prefixer.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/tcp/prefixer.go
new file mode 100644
index 0000000000..8b035564d2
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/tcp/prefixer.go
@@ -0,0 +1,30 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package tcp
+
+import "bytes"
+
+// prefixer prepends a prefix to a message.
+type prefixer struct {
+ get func() string
+ buffer bytes.Buffer
+}
+
+// newPrefixer returns a prefixer that will fetch the prefix and prepends it a given message each time apply is called.
+func newPrefixer(getter func() string) *prefixer {
+ return &prefixer{
+ get: getter,
+ }
+}
+
+// apply prepends the prefix and a space to the message.
+func (p *prefixer) apply(content []byte) []byte {
+ p.buffer.Reset()
+ p.buffer.WriteString(p.get())
+ p.buffer.WriteByte(' ')
+ p.buffer.Write(content)
+ return p.buffer.Bytes()
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/tcp/test_utils.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/tcp/test_utils.go
new file mode 100644
index 0000000000..6bfbc3b193
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/client/tcp/test_utils.go
@@ -0,0 +1,36 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package tcp
+
+import (
+ "net"
+
+ "github.com/DataDog/datadog-agent/comp/logs/agent/config"
+ "github.com/DataDog/datadog-agent/pkg/logs/client"
+ "github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface"
+)
+
+// AddrToHostPort converts a net.Addr to a (string, int).
+func AddrToHostPort(remoteAddr net.Addr) (string, int) {
+ switch addr := remoteAddr.(type) {
+ case *net.UDPAddr:
+ return addr.IP.String(), addr.Port
+ case *net.TCPAddr:
+ return addr.IP.String(), addr.Port
+ }
+ return "", 0
+}
+
+// AddrToEndPoint creates an EndPoint from an Addr.
+func AddrToEndPoint(addr net.Addr) config.Endpoint {
+ host, port := AddrToHostPort(addr)
+ return config.NewEndpoint("", host, port, false)
+}
+
+// AddrToDestination creates a Destination from an Addr
+func AddrToDestination(addr net.Addr, ctx *client.DestinationsContext, status statusinterface.Status) *Destination {
+ return NewDestination(AddrToEndPoint(addr), true, ctx, true, status)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/diagnostic/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/logs/diagnostic/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/diagnostic/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/diagnostic/format.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/diagnostic/format.go
new file mode 100644
index 0000000000..324404cc33
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/diagnostic/format.go
@@ -0,0 +1,52 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package diagnostic
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface"
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+)
+
+// Formatter defines how a particular message.Message should be formatted
+type Formatter interface {
+ // Format transforms the supplied message.Message into a more user-friendly format, for diagnostic purposes.
+ Format(*message.Message, string, []byte) string
+}
+
+// logFormatter is the default Formatter which supports transforming log pipeline messages into a more useful format.
+type logFormatter struct {
+ hostname hostnameinterface.Component
+}
+
+//nolint:revive // TODO(AML) Fix revive linter
+func (l *logFormatter) Format(m *message.Message, eventType string, redactedMsg []byte) string {
+ hname, err := l.hostname.Get(context.TODO())
+ if err != nil {
+ hname = "unknown"
+ }
+
+ ts := time.Now().UTC()
+ // TODO(remy): should we consider renaming the "Timestamp: %s" to mention
+ // it's only concerning the serverless agent?
+ if !m.ServerlessExtra.Timestamp.IsZero() {
+ ts = m.ServerlessExtra.Timestamp
+ }
+
+ return fmt.Sprintf("Integration Name: %s | Type: %s | Status: %s | Timestamp: %s | Hostname: %s | Service: %s | Source: %s | Tags: %s | Message: %s\n",
+ m.Origin.LogSource.Name,
+ m.Origin.LogSource.Config.Type,
+ m.GetStatus(),
+ ts,
+ hname,
+ m.Origin.Service(),
+ m.Origin.Source(),
+ m.TagsToString(),
+ string(redactedMsg))
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/diagnostic/message_receiver.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/diagnostic/message_receiver.go
new file mode 100644
index 0000000000..6a08dddc22
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/diagnostic/message_receiver.go
@@ -0,0 +1,153 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package diagnostic
+
+import (
+ "sync"
+
+ "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface"
+ "github.com/DataDog/datadog-agent/comp/logs/agent/config"
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+)
+
+// MessageReceiver interface to handle messages for diagnostics
+type MessageReceiver interface {
+ HandleMessage(*message.Message, []byte, string)
+}
+
+type messagePair struct {
+ msg *message.Message
+ rendered []byte
+ eventType string
+}
+
+// BufferedMessageReceiver handles in coming log messages and makes them available for diagnostics
+type BufferedMessageReceiver struct {
+ inputChan chan messagePair
+ enabled bool
+ m sync.RWMutex
+ formatter Formatter
+}
+
+// Filters for processing log messages
+type Filters struct {
+ Name string `json:"name"`
+ Type string `json:"type"`
+ Source string `json:"source"`
+ Service string `json:"service"`
+}
+
+// NewBufferedMessageReceiver creates a new MessageReceiver. It takes an optional Formatter as a parameter, and defaults
+// to using logFormatter if not supplied.
+func NewBufferedMessageReceiver(f Formatter, hostname hostnameinterface.Component) *BufferedMessageReceiver {
+ if f == nil {
+ f = &logFormatter{
+ hostname: hostname,
+ }
+ }
+ return &BufferedMessageReceiver{
+ inputChan: make(chan messagePair, config.ChanSize),
+ formatter: f,
+ }
+}
+
+// Start opens new input channel
+func (b *BufferedMessageReceiver) Start() {
+ b.inputChan = make(chan messagePair, config.ChanSize)
+}
+
+// Stop closes the input channel
+func (b *BufferedMessageReceiver) Stop() {
+ close(b.inputChan)
+}
+
+// Clear empties buffered messages
+func (b *BufferedMessageReceiver) clear() {
+ l := len(b.inputChan)
+ for i := 0; i < l; i++ {
+ <-b.inputChan
+ }
+}
+
+// SetEnabled start collecting log messages for diagnostics. Returns true if state was successfully changed
+func (b *BufferedMessageReceiver) SetEnabled(e bool) bool {
+ b.m.Lock()
+ defer b.m.Unlock()
+
+ if b.enabled == e {
+ return false
+ }
+
+ b.enabled = e
+ if !e {
+ b.clear()
+ }
+ return true
+}
+
+// IsEnabled returns the enabled state of the message receiver
+func (b *BufferedMessageReceiver) IsEnabled() bool {
+ b.m.RLock()
+ defer b.m.RUnlock()
+ return b.enabled
+}
+
+// HandleMessage buffers a message for diagnostic processing
+func (b *BufferedMessageReceiver) HandleMessage(m *message.Message, rendered []byte, eventType string) {
+ if !b.IsEnabled() {
+ return
+ }
+ b.inputChan <- messagePair{
+ msg: m,
+ rendered: rendered,
+ eventType: eventType,
+ }
+}
+
+// Filter writes the buffered events from the input channel formatted as a string to the output channel
+func (b *BufferedMessageReceiver) Filter(filters *Filters, done <-chan struct{}) <-chan string {
+ out := make(chan string, config.ChanSize)
+ go func() {
+ defer close(out)
+ for {
+ select {
+ case msgPair := <-b.inputChan:
+ if shouldHandleMessage(&msgPair, filters) {
+ out <- b.formatter.Format(msgPair.msg, msgPair.eventType, msgPair.rendered)
+ }
+ case <-done:
+ return
+ }
+ }
+ }()
+ return out
+}
+
+func shouldHandleMessage(m *messagePair, filters *Filters) bool {
+ if filters == nil {
+ return true
+ }
+
+ shouldHandle := true
+
+ if filters.Name != "" {
+ shouldHandle = shouldHandle && m.msg.Origin != nil && m.msg.Origin.LogSource.Name == filters.Name
+ }
+
+ if filters.Type != "" {
+ shouldHandle = shouldHandle && ((m.msg.Origin != nil && m.msg.Origin.LogSource.Config.Type == filters.Type) || m.eventType == filters.Type)
+ }
+
+ if filters.Source != "" {
+ shouldHandle = shouldHandle && m.msg.Origin != nil && filters.Source == m.msg.Origin.Source()
+ }
+
+ if filters.Service != "" {
+ shouldHandle = shouldHandle && m.msg.Origin != nil && filters.Service == m.msg.Origin.Service()
+ }
+
+ return shouldHandle
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/diagnostic/noop_message_receiver.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/diagnostic/noop_message_receiver.go
new file mode 100644
index 0000000000..3621637fb7
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/diagnostic/noop_message_receiver.go
@@ -0,0 +1,15 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package diagnostic
+
+import "github.com/DataDog/datadog-agent/pkg/logs/message"
+
+// NoopMessageReceiver for cases where diagnosing messages is unsupported or not needed (serverless, tests)
+type NoopMessageReceiver struct{}
+
+// HandleMessage does nothing with the message
+func (n *NoopMessageReceiver) HandleMessage(_ *message.Message, _ []byte, _ string) {
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/message/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/logs/message/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/message/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/message/message.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/message/message.go
new file mode 100644
index 0000000000..71e45c1bed
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/message/message.go
@@ -0,0 +1,318 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//nolint:revive // TODO(AML) Fix revive linter
+package message
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/DataDog/datadog-agent/pkg/logs/sources"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+// Payload represents an encoded collection of messages ready to be sent to the intake
+type Payload struct {
+ // The slice of sources messages encoded in the payload
+ Messages []*Message
+ // The encoded bytes to be sent to the intake (sometimes compressed)
+ Encoded []byte
+ // The content encoding. A header for HTTP, empty for TCP
+ Encoding string
+ // The size of the unencoded payload
+ UnencodedSize int
+}
+
+// Message represents a log line sent to datadog, with its metadata
+type Message struct {
+ MessageContent
+ Hostname string
+ Origin *Origin
+ Status string
+ IngestionTimestamp int64
+ RawDataLen int
+ // Tags added on processing
+ ProcessingTags []string
+ // Extra information from the parsers
+ ParsingExtra
+ // Extra information for Serverless Logs messages
+ ServerlessExtra
+}
+
+// MessageContent contains the message and possibly the tailer internal representation
+// of every message.
+//
+// To use the MessageContent struct, use `GetContent() []byte` or SetContent([]byte)`
+// makes sure of doing the right thing depending on the MessageContent state.
+//
+// MessageContent different states:
+//
+// +-------------------+
+// | StateUnstructured | ------
+// +-------------------+ |
+// |
+// v
+// ( Processor ) +---------------+ ( Processor ) +--------------+
+// ( Renders ) -> | StateRendered | -> ( Encodes ) -> | StateEncoded |
+// ^ +---------------+ +--------------+
+// | |
+// +-------------------+ | v
+// | StateStructured | ------ ( Diagnostic )
+// +-------------------+ (Message Receiver)
+//
+// In `StateUnstructured`, the content in `Content` is the raw log collected by the tailer.
+// In `StateStructured`, `Content` is empty and the log information are in `StructuredContent`.
+// In `StateRendered`, `Content` contains rendered data (from raw/structured logs to something
+// ready to be encoded), the rest should not be used.
+// In `StateEncoded`, `Content` contains the encoded data, the rest should not be used.
+//
+// Note that there is no state distinction between parsed and unparsed content as none was needed
+// for the current implementation, but it is a potential future change with a `StateParsed` state.
+type MessageContent struct { //nolint:revive
+ // unstructured content
+ content []byte
+ // structured content
+ structuredContent StructuredContent
+ State MessageContentState
+}
+
+// MessageContentState is used to represent the MessageContent state.
+type MessageContentState uint32 // nolint:revive
+
+const (
+ // StateUnstructured for unstructured content (e.g. file tailing)
+ StateUnstructured MessageContentState = iota
+ // StateStructured for structured content (e.g. journald tailing, windowsevent tailing)
+ StateStructured
+ // StateRendered means that the MessageContent contains rendered (i.e. structured content has been rendered)
+ StateRendered
+ // StateEncoded means the MessageContent passed through the encoder (e.g. json encoder, proto encoder, ...)
+ StateEncoded
+)
+
+// GetContent returns the bytes array containing only the message content
+// E.g. from a structured log:
+//
+// Sep 12 14:38:14 user my-app[1316]: time="2023-09-12T14:38:14Z" level=info msg="Starting the main execution"
+//
+// It would only return the `[]byte` containing "Starting the main execution"
+// While for unstructured log and for source configured with ProcessRawMessage=true,
+// the whole `[]byte` content is returned.
+// See `MessageContent` comment for more information as this method could also
+// return the message content in different state (rendered, encoded).
+func (m *MessageContent) GetContent() []byte {
+ switch m.State {
+ // for raw, rendered or encoded message, the data has
+ // been written into m.Content
+ case StateUnstructured, StateRendered, StateEncoded:
+ return m.content
+ // when using GetContent() on a structured log, we want
+ // to only return the part containing the content (e.g. for message
+ // processing or for scrubbing)
+ case StateStructured:
+ return m.structuredContent.GetContent()
+ default:
+ log.Error("Unknown state for message on call to SetContent:", m.State)
+ return m.content
+ }
+}
+
+// SetContent stores the given content as the content message.
+// SetContent uses the current message state to know where
+// to store the content.
+func (m *MessageContent) SetContent(content []byte) {
+ switch m.State {
+ case StateStructured:
+ m.structuredContent.SetContent(content)
+ case StateUnstructured, StateRendered, StateEncoded:
+ m.content = content
+ default:
+ log.Error("Unknown state for message on call to SetContent:", m.State)
+ m.content = content
+ }
+}
+
+// SetRendered sets the content for the MessageContent and sets MessageContent state to rendered.
+func (m *MessageContent) SetRendered(content []byte) {
+ m.content = content
+ m.State = StateRendered
+}
+
+// SetEncoded sets the content for the MessageContent and sets MessageContent state to encoded.
+func (m *MessageContent) SetEncoded(content []byte) {
+ m.content = content
+ m.State = StateEncoded
+}
+
+// ParsingExtra ships extra information parsers want to make available
+// to the rest of the pipeline.
+// E.g. Timestamp is used by the docker parsers to transmit a tailing offset.
+type ParsingExtra struct {
+ // Used by docker parsers to transmit an offset.
+ Timestamp string
+ IsPartial bool
+}
+
+// ServerlessExtra ships extra information from logs processing in serverless envs.
+type ServerlessExtra struct {
+ // Optional. Must be UTC. If not provided, time.Now().UTC() will be used
+ // Used in the Serverless Agent
+ Timestamp time.Time
+ // Optional.
+ // Used in the Serverless Agent
+ Lambda *Lambda
+}
+
+// Lambda is a struct storing information about the Lambda function and function execution.
+type Lambda struct {
+ ARN string
+ RequestID string
+}
+
+// NewMessageWithSource constructs an unstructured message
+// with content, status and a log source.
+func NewMessageWithSource(content []byte, status string, source *sources.LogSource, ingestionTimestamp int64) *Message {
+ return NewMessage(content, NewOrigin(source), status, ingestionTimestamp)
+}
+
+// NewMessage constructs an unstructured message with content,
+// status, origin and the ingestion timestamp.
+func NewMessage(content []byte, origin *Origin, status string, ingestionTimestamp int64) *Message {
+ return &Message{
+ MessageContent: MessageContent{
+ content: content,
+ State: StateUnstructured,
+ },
+ Origin: origin,
+ Status: status,
+ IngestionTimestamp: ingestionTimestamp,
+ }
+}
+
+// NewStructuredMessage creates a new message that had some structure the moment
+// it has been captured through a tailer.
+// e.g. a journald message which is a JSON object containing extra information, including
+// the actual message of the entry. We need these objects to be able to apply
+// processing on the message entry only, while we still have to send all
+// the information to the intake.
+func NewStructuredMessage(content StructuredContent, origin *Origin, status string, ingestionTimestamp int64) *Message {
+ return &Message{
+ MessageContent: MessageContent{
+ structuredContent: content,
+ State: StateStructured,
+ },
+ Origin: origin,
+ Status: status,
+ IngestionTimestamp: ingestionTimestamp,
+ }
+}
+
+// Render renders the message.
+// The only state in which this call is changing the content for a StateStructured message.
+func (m *Message) Render() ([]byte, error) {
+ switch m.State {
+ case StateUnstructured:
+ return m.content, nil
+ case StateStructured:
+ data, err := m.MessageContent.structuredContent.Render()
+ if err != nil {
+ return nil, err
+ }
+ return data, nil
+ case StateRendered:
+ return m.content, nil
+ case StateEncoded:
+ return m.content, fmt.Errorf("render call on an encoded message")
+ default:
+ return m.content, fmt.Errorf("unknown message state for rendering")
+ }
+}
+
+// StructuredContent stores enough information from a tailer to manipulate a
+// structured log message (from journald or windowsevents) and to render it to
+// be encoded later on in the pipeline.
+type StructuredContent interface {
+ Render() ([]byte, error)
+ GetContent() []byte
+ SetContent([]byte)
+}
+
+// BasicStructuredContent is used by tailers creating structured logs
+// but with basic needs for transport.
+// The message from the log is stored in the "message" key.
+type BasicStructuredContent struct {
+ Data map[string]interface{}
+}
+
+// Render renders in json the underlying data, it is then ready to be
+// encoded and sent to the intake. See the `MessageContent` comment.
+func (m *BasicStructuredContent) Render() ([]byte, error) {
+ return json.Marshal(m.Data)
+}
+
+// GetContent returns the message part of the structured log,
+// in the "message" key of the underlying map.
+func (m *BasicStructuredContent) GetContent() []byte {
+ if value, exists := m.Data["message"]; exists {
+ return []byte(value.(string))
+ }
+ log.Error("BasicStructuredContent not containing any message")
+ return []byte{}
+}
+
+// SetContent stores the message part of the structured log,
+// in the "message" key of the underlying map.
+func (m *BasicStructuredContent) SetContent(content []byte) {
+ // we want to store it typed as a string for the json
+ // marshaling to properly marshal it as a string.
+ m.Data["message"] = string(content)
+}
+
+// NewMessageFromLambda construts a message with content, status, origin and with
+// the given timestamp and Lambda metadata.
+func NewMessageFromLambda(content []byte, origin *Origin, status string, utcTime time.Time, ARN, reqID string, ingestionTimestamp int64) *Message {
+ return &Message{
+ MessageContent: MessageContent{
+ content: content,
+ State: StateUnstructured,
+ },
+ Origin: origin,
+ Status: status,
+ IngestionTimestamp: ingestionTimestamp,
+ ServerlessExtra: ServerlessExtra{
+ Timestamp: utcTime,
+ Lambda: &Lambda{
+ ARN: ARN,
+ RequestID: reqID,
+ },
+ },
+ }
+}
+
+// GetStatus gets the status of the message.
+// if status is not set, StatusInfo will be returned.
+func (m *Message) GetStatus() string {
+ if m.Status == "" {
+ m.Status = StatusInfo
+ }
+ return m.Status
+}
+
+// GetLatency returns the latency delta from ingestion time until now
+func (m *Message) GetLatency() int64 {
+ return time.Now().UnixNano() - m.IngestionTimestamp
+}
+
+// Message returns all tags that this message is attached with.
+func (m *Message) Tags() []string {
+ return m.Origin.Tags(m.ProcessingTags)
+}
+
+// Message returns all tags that this message is attached with, as a string.
+func (m *Message) TagsToString() string {
+ return m.Origin.TagsToString(m.ProcessingTags)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/message/origin.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/message/origin.go
new file mode 100644
index 0000000000..de4e5f0aa7
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/message/origin.go
@@ -0,0 +1,121 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package message
+
+import (
+ "strings"
+
+ "github.com/DataDog/datadog-agent/pkg/logs/sources"
+)
+
+// Origin represents the Origin of a message
+type Origin struct {
+ Identifier string
+ LogSource *sources.LogSource
+ Offset string
+ service string
+ source string
+ tags []string
+}
+
+// NewOrigin returns a new Origin
+func NewOrigin(source *sources.LogSource) *Origin {
+ return &Origin{
+ LogSource: source,
+ }
+}
+
+// Tags returns the tags of the origin.
+//
+// The returned slice must not be modified by the caller.
+func (o *Origin) Tags(processingTags []string) []string {
+ return o.tagsToStringArray(processingTags)
+}
+
+// TagsPayload returns the raw tag payload of the origin.
+func (o *Origin) TagsPayload(processingTags []string) []byte {
+ var tagsPayload []byte
+
+ source := o.Source()
+ if source != "" {
+ tagsPayload = append(tagsPayload, []byte("[dd ddsource=\""+source+"\"]")...)
+ }
+ sourceCategory := o.LogSource.Config.SourceCategory
+ if sourceCategory != "" {
+ tagsPayload = append(tagsPayload, []byte("[dd ddsourcecategory=\""+sourceCategory+"\"]")...)
+ }
+
+ var tags []string
+ tags = append(tags, o.LogSource.Config.Tags...)
+ tags = append(tags, o.tags...)
+ tags = append(tags, processingTags...)
+
+ if len(tags) > 0 {
+ tagsPayload = append(tagsPayload, []byte("[dd ddtags=\""+strings.Join(tags, ",")+"\"]")...)
+ }
+ if len(tagsPayload) == 0 {
+ tagsPayload = []byte{}
+ }
+ return tagsPayload
+}
+
+// TagsToString encodes tags to a single string, in a comma separated format
+func (o *Origin) TagsToString(processingTags []string) string {
+ tags := o.tagsToStringArray(processingTags)
+
+ if tags == nil {
+ return ""
+ }
+
+ return strings.Join(tags, ",")
+}
+
+func (o *Origin) tagsToStringArray(processingTags []string) []string {
+ tags := o.tags
+
+ sourceCategory := o.LogSource.Config.SourceCategory
+ if sourceCategory != "" {
+ tags = append(tags, "sourcecategory"+":"+sourceCategory)
+ }
+
+ tags = append(tags, o.LogSource.Config.Tags...)
+ tags = append(tags, processingTags...)
+
+ return tags
+}
+
+// SetTags sets the tags of the origin.
+func (o *Origin) SetTags(tags []string) {
+ o.tags = tags
+}
+
+// SetSource sets the source of the origin.
+func (o *Origin) SetSource(source string) {
+ o.source = source
+}
+
+// Source returns the source of the configuration if set or the source of the message,
+// if none are defined, returns an empty string by default.
+func (o *Origin) Source() string {
+ if o.LogSource.Config.Source != "" {
+ return o.LogSource.Config.Source
+ }
+ return o.source
+}
+
+// SetService sets the service of the origin.
+func (o *Origin) SetService(service string) {
+ o.service = service
+}
+
+// Service returns the service of the configuration if set or the service of the message,
+// if none are defined, returns an empty string by default.
+func (o *Origin) Service() string {
+ if o.LogSource.Config.Service != "" {
+ return o.LogSource.Config.Service
+ }
+ return o.service
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/message/status.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/message/status.go
new file mode 100644
index 0000000000..68704447c0
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/message/status.go
@@ -0,0 +1,50 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package message
+
+// Status values
+const (
+ StatusEmergency = "emergency"
+ StatusAlert = "alert"
+ StatusCritical = "critical"
+ StatusError = "error"
+ StatusWarning = "warn"
+ StatusNotice = "notice"
+ StatusInfo = "info"
+ StatusDebug = "debug"
+)
+
+// Syslog severity levels
+var (
+ SevEmergency = []byte("<40>")
+ SevAlert = []byte("<41>")
+ SevCritical = []byte("<42>")
+ SevError = []byte("<43>")
+ SevWarning = []byte("<44>")
+ SevNotice = []byte("<45>")
+ SevInfo = []byte("<46>")
+ SevDebug = []byte("<47>")
+)
+
+// statusSeverityMapping represents the 1:1 mapping between statuses and severities.
+var statusSeverityMapping = map[string][]byte{
+ StatusEmergency: SevEmergency,
+ StatusAlert: SevAlert,
+ StatusCritical: SevCritical,
+ StatusError: SevError,
+ StatusWarning: SevWarning,
+ StatusNotice: SevNotice,
+ StatusInfo: SevInfo,
+ StatusDebug: SevDebug,
+}
+
+// StatusToSeverity transforms a severity into a status.
+func StatusToSeverity(status string) []byte {
+ if sev, exists := statusSeverityMapping[status]; exists {
+ return sev
+ }
+ return SevInfo
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/metrics/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/logs/metrics/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/metrics/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/metrics/metrics.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/metrics/metrics.go
new file mode 100644
index 0000000000..063a4e6afe
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/metrics/metrics.go
@@ -0,0 +1,88 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//nolint:revive // TODO(AML) Fix revive linter
+package metrics
+
+import (
+ "expvar"
+
+ "github.com/DataDog/datadog-agent/pkg/telemetry"
+)
+
+var (
+ // LogsExpvars contains metrics for the logs agent.
+ LogsExpvars *expvar.Map
+ // LogsDecoded is the total number of decoded logs
+ LogsDecoded = expvar.Int{}
+ // TlmLogsDecoded is the total number of decoded logs
+ TlmLogsDecoded = telemetry.NewCounter("logs", "decoded",
+ nil, "Total number of decoded logs")
+ // LogsProcessed is the total number of processed logs.
+ LogsProcessed = expvar.Int{}
+ // TlmLogsProcessed is the total number of processed logs.
+ TlmLogsProcessed = telemetry.NewCounter("logs", "processed",
+ nil, "Total number of processed logs")
+
+ // LogsSent is the total number of sent logs.
+ LogsSent = expvar.Int{}
+ // TlmLogsSent is the total number of sent logs.
+ TlmLogsSent = telemetry.NewCounter("logs", "sent",
+ nil, "Total number of sent logs")
+ // DestinationErrors is the total number of network errors.
+ DestinationErrors = expvar.Int{}
+ // TlmDestinationErrors is the total number of network errors.
+ TlmDestinationErrors = telemetry.NewCounter("logs", "network_errors",
+ nil, "Total number of network errors")
+ // DestinationLogsDropped is the total number of logs dropped per Destination
+ DestinationLogsDropped = expvar.Map{}
+ // TlmLogsDropped is the total number of logs dropped per Destination
+ TlmLogsDropped = telemetry.NewCounter("logs", "dropped",
+ []string{"destination"}, "Total number of logs dropped per Destination")
+ // BytesSent is the total number of sent bytes before encoding if any
+ BytesSent = expvar.Int{}
+ // TlmBytesSent is the total number of sent bytes before encoding if any
+ TlmBytesSent = telemetry.NewCounter("logs", "bytes_sent",
+ nil, "Total number of bytes send before encoding if any")
+ // RetryCount is the total number of times we have retried payloads that failed to send
+ RetryCount = expvar.Int{}
+ // TlmRetryCountis the total number of times we have retried payloads that failed to send
+ TlmRetryCount = telemetry.NewCounter("logs", "retry_count",
+ nil, "Total number of retried paylaods")
+ // RetryTimeSpent is the total time spent retrying payloads that failed to send
+ RetryTimeSpent = expvar.Int{}
+ // EncodedBytesSent is the total number of sent bytes after encoding if any
+ EncodedBytesSent = expvar.Int{}
+ // TlmEncodedBytesSent is the total number of sent bytes after encoding if any
+ TlmEncodedBytesSent = telemetry.NewCounter("logs", "encoded_bytes_sent",
+ nil, "Total number of sent bytes after encoding if any")
+ // SenderLatency the last reported latency value from the http sender (ms)
+ SenderLatency = expvar.Int{}
+ // TlmSenderLatency a histogram of http sender latency (ms)
+ TlmSenderLatency = telemetry.NewHistogram("logs", "sender_latency",
+ nil, "Histogram of http sender latency in ms", []float64{10, 25, 50, 75, 100, 250, 500, 1000, 10000})
+ // DestinationExpVars a map of sender utilization metrics for each http destination
+ DestinationExpVars = expvar.Map{}
+ // TODO: Add LogsCollected for the total number of collected logs.
+ //nolint:revive // TODO(AML) Fix revive linter
+ DestinationHttpRespByStatusAndUrl = expvar.Map{}
+ //nolint:revive // TODO(AML) Fix revive linter
+ TlmDestinationHttpRespByStatusAndUrl = telemetry.NewCounter("logs", "destination_http_resp", []string{"status_code", "url"}, "Count of http responses by status code and destination url")
+)
+
+func init() {
+ LogsExpvars = expvar.NewMap("logs-agent")
+ LogsExpvars.Set("LogsDecoded", &LogsDecoded)
+ LogsExpvars.Set("LogsProcessed", &LogsProcessed)
+ LogsExpvars.Set("LogsSent", &LogsSent)
+ LogsExpvars.Set("DestinationErrors", &DestinationErrors)
+ LogsExpvars.Set("DestinationLogsDropped", &DestinationLogsDropped)
+ LogsExpvars.Set("BytesSent", &BytesSent)
+ LogsExpvars.Set("RetryCount", &RetryCount)
+ LogsExpvars.Set("RetryTimeSpent", &RetryTimeSpent)
+ LogsExpvars.Set("EncodedBytesSent", &EncodedBytesSent)
+ LogsExpvars.Set("SenderLatency", &SenderLatency)
+ LogsExpvars.Set("HttpDestinationStats", &DestinationExpVars)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/pipeline/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/logs/pipeline/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/pipeline/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/pipeline/pipeline.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/pipeline/pipeline.go
new file mode 100644
index 0000000000..5c0bffc8ee
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/pipeline/pipeline.go
@@ -0,0 +1,136 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//nolint:revive // TODO(AML) Fix revive linter
+package pipeline
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface"
+ "github.com/DataDog/datadog-agent/comp/logs/agent/config"
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ "github.com/DataDog/datadog-agent/pkg/logs/client"
+ "github.com/DataDog/datadog-agent/pkg/logs/client/http"
+ "github.com/DataDog/datadog-agent/pkg/logs/client/tcp"
+ "github.com/DataDog/datadog-agent/pkg/logs/diagnostic"
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/logs/processor"
+ "github.com/DataDog/datadog-agent/pkg/logs/sender"
+ "github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface"
+)
+
+// Pipeline processes and sends messages to the backend
+type Pipeline struct {
+ InputChan chan *message.Message
+ flushChan chan struct{}
+ processor *processor.Processor
+ strategy sender.Strategy
+ sender *sender.Sender
+}
+
+// NewPipeline returns a new Pipeline
+func NewPipeline(outputChan chan *message.Payload,
+ processingRules []*config.ProcessingRule,
+ endpoints *config.Endpoints,
+ destinationsContext *client.DestinationsContext,
+ diagnosticMessageReceiver diagnostic.MessageReceiver,
+ serverless bool,
+ pipelineID int,
+ status statusinterface.Status,
+ hostname hostnameinterface.Component,
+ cfg pkgconfigmodel.Reader) *Pipeline {
+
+ mainDestinations := getDestinations(endpoints, destinationsContext, pipelineID, serverless, status, cfg)
+
+ strategyInput := make(chan *message.Message, config.ChanSize)
+ senderInput := make(chan *message.Payload, 1) // Only buffer 1 message since payloads can be large
+ flushChan := make(chan struct{})
+
+ var logsSender *sender.Sender
+
+ var encoder processor.Encoder
+ if serverless {
+ encoder = processor.JSONServerlessEncoder
+ } else if endpoints.UseHTTP {
+ encoder = processor.JSONEncoder
+ } else if endpoints.UseProto {
+ encoder = processor.ProtoEncoder
+ } else {
+ encoder = processor.RawEncoder
+ }
+
+ strategy := getStrategy(strategyInput, senderInput, flushChan, endpoints, serverless, pipelineID)
+ logsSender = sender.NewSender(cfg, senderInput, outputChan, mainDestinations, config.DestinationPayloadChanSize)
+
+ inputChan := make(chan *message.Message, config.ChanSize)
+ processor := processor.New(inputChan, strategyInput, processingRules, encoder, diagnosticMessageReceiver, hostname, pipelineID)
+
+ return &Pipeline{
+ InputChan: inputChan,
+ flushChan: flushChan,
+ processor: processor,
+ strategy: strategy,
+ sender: logsSender,
+ }
+}
+
+// Start launches the pipeline
+func (p *Pipeline) Start() {
+ p.sender.Start()
+ p.strategy.Start()
+ p.processor.Start()
+}
+
+// Stop stops the pipeline
+func (p *Pipeline) Stop() {
+ p.processor.Stop()
+ p.strategy.Stop()
+ p.sender.Stop()
+}
+
+// Flush flushes synchronously the processor and sender managed by this pipeline.
+func (p *Pipeline) Flush(ctx context.Context) {
+ p.flushChan <- struct{}{}
+ p.processor.Flush(ctx) // flush messages in the processor into the sender
+}
+
+func getDestinations(endpoints *config.Endpoints, destinationsContext *client.DestinationsContext, pipelineID int, serverless bool, status statusinterface.Status, cfg pkgconfigmodel.Reader) *client.Destinations {
+ reliable := []client.Destination{}
+ additionals := []client.Destination{}
+
+ if endpoints.UseHTTP {
+ for i, endpoint := range endpoints.GetReliableEndpoints() {
+ telemetryName := fmt.Sprintf("logs_%d_reliable_%d", pipelineID, i)
+ reliable = append(reliable, http.NewDestination(endpoint, http.JSONContentType, destinationsContext, endpoints.BatchMaxConcurrentSend, !serverless, telemetryName, cfg))
+ }
+ for i, endpoint := range endpoints.GetUnReliableEndpoints() {
+ telemetryName := fmt.Sprintf("logs_%d_unreliable_%d", pipelineID, i)
+ additionals = append(additionals, http.NewDestination(endpoint, http.JSONContentType, destinationsContext, endpoints.BatchMaxConcurrentSend, false, telemetryName, cfg))
+ }
+ return client.NewDestinations(reliable, additionals)
+ }
+ for _, endpoint := range endpoints.GetReliableEndpoints() {
+ reliable = append(reliable, tcp.NewDestination(endpoint, endpoints.UseProto, destinationsContext, !serverless, status))
+ }
+ for _, endpoint := range endpoints.GetUnReliableEndpoints() {
+ additionals = append(additionals, tcp.NewDestination(endpoint, endpoints.UseProto, destinationsContext, false, status))
+ }
+
+ return client.NewDestinations(reliable, additionals)
+}
+
+//nolint:revive // TODO(AML) Fix revive linter
+func getStrategy(inputChan chan *message.Message, outputChan chan *message.Payload, flushChan chan struct{}, endpoints *config.Endpoints, serverless bool, pipelineID int) sender.Strategy {
+ if endpoints.UseHTTP || serverless {
+ encoder := sender.IdentityContentType
+ if endpoints.Main.UseCompression {
+ encoder = sender.NewGzipContentEncoding(endpoints.Main.CompressionLevel)
+ }
+ return sender.NewBatchStrategy(inputChan, outputChan, flushChan, sender.ArraySerializer, endpoints.BatchWait, endpoints.BatchMaxSize, endpoints.BatchMaxContentSize, "logs", encoder)
+ }
+ return sender.NewStreamStrategy(inputChan, outputChan, sender.IdentityContentType)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/pipeline/provider.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/pipeline/provider.go
new file mode 100644
index 0000000000..5554843aa3
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/pipeline/provider.go
@@ -0,0 +1,177 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package pipeline
+
+import (
+ "context"
+
+ "github.com/hashicorp/go-multierror"
+ "go.uber.org/atomic"
+
+ "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface"
+ "github.com/DataDog/datadog-agent/comp/logs/agent/config"
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ "github.com/DataDog/datadog-agent/pkg/logs/auditor"
+ "github.com/DataDog/datadog-agent/pkg/logs/client"
+ "github.com/DataDog/datadog-agent/pkg/logs/diagnostic"
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/logs/sds"
+ "github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+ "github.com/DataDog/datadog-agent/pkg/util/startstop"
+)
+
+// Provider provides message channels
+type Provider interface {
+ Start()
+ Stop()
+ ReconfigureSDSStandardRules(standardRules []byte) error
+ ReconfigureSDSAgentConfig(config []byte) error
+ NextPipelineChan() chan *message.Message
+ // Flush flushes all pipeline contained in this Provider
+ Flush(ctx context.Context)
+}
+
+// provider implements providing logic
+type provider struct {
+ numberOfPipelines int
+ auditor auditor.Auditor
+ diagnosticMessageReceiver diagnostic.MessageReceiver
+ outputChan chan *message.Payload
+ processingRules []*config.ProcessingRule
+ endpoints *config.Endpoints
+
+ pipelines []*Pipeline
+ currentPipelineIndex *atomic.Uint32
+ destinationsContext *client.DestinationsContext
+
+ serverless bool
+
+ status statusinterface.Status
+ hostname hostnameinterface.Component
+ cfg pkgconfigmodel.Reader
+}
+
+// NewProvider returns a new Provider
+func NewProvider(numberOfPipelines int, auditor auditor.Auditor, diagnosticMessageReceiver diagnostic.MessageReceiver, processingRules []*config.ProcessingRule, endpoints *config.Endpoints, destinationsContext *client.DestinationsContext, status statusinterface.Status, hostname hostnameinterface.Component, cfg pkgconfigmodel.Reader) Provider {
+ return newProvider(numberOfPipelines, auditor, diagnosticMessageReceiver, processingRules, endpoints, destinationsContext, false, status, hostname, cfg)
+}
+
+// NewServerlessProvider returns a new Provider in serverless mode
+func NewServerlessProvider(numberOfPipelines int, auditor auditor.Auditor, processingRules []*config.ProcessingRule, endpoints *config.Endpoints, destinationsContext *client.DestinationsContext, status statusinterface.Status, hostname hostnameinterface.Component, cfg pkgconfigmodel.Reader) Provider {
+ return newProvider(numberOfPipelines, auditor, &diagnostic.NoopMessageReceiver{}, processingRules, endpoints, destinationsContext, true, status, hostname, cfg)
+}
+
+// NewMockProvider creates a new provider that will not provide any pipelines.
+func NewMockProvider() Provider {
+ return &provider{}
+}
+
+func newProvider(numberOfPipelines int, auditor auditor.Auditor, diagnosticMessageReceiver diagnostic.MessageReceiver, processingRules []*config.ProcessingRule, endpoints *config.Endpoints, destinationsContext *client.DestinationsContext, serverless bool, status statusinterface.Status, hostname hostnameinterface.Component, cfg pkgconfigmodel.Reader) Provider {
+ return &provider{
+ numberOfPipelines: numberOfPipelines,
+ auditor: auditor,
+ diagnosticMessageReceiver: diagnosticMessageReceiver,
+ processingRules: processingRules,
+ endpoints: endpoints,
+ pipelines: []*Pipeline{},
+ currentPipelineIndex: atomic.NewUint32(0),
+ destinationsContext: destinationsContext,
+ serverless: serverless,
+ status: status,
+ hostname: hostname,
+ cfg: cfg,
+ }
+}
+
+// Start initializes the pipelines
+func (p *provider) Start() {
+ // This requires the auditor to be started before.
+ p.outputChan = p.auditor.Channel()
+
+ for i := 0; i < p.numberOfPipelines; i++ {
+ pipeline := NewPipeline(p.outputChan, p.processingRules, p.endpoints, p.destinationsContext, p.diagnosticMessageReceiver, p.serverless, i, p.status, p.hostname, p.cfg)
+ pipeline.Start()
+ p.pipelines = append(p.pipelines, pipeline)
+ }
+}
+
+// Stop stops all pipelines in parallel,
+// this call blocks until all pipelines are stopped
+func (p *provider) Stop() {
+ stopper := startstop.NewParallelStopper()
+ for _, pipeline := range p.pipelines {
+ stopper.Add(pipeline)
+ }
+ stopper.Stop()
+ p.pipelines = p.pipelines[:0]
+ p.outputChan = nil
+}
+
+func (p *provider) reconfigureSDS(config []byte, orderType sds.ReconfigureOrderType) error {
+ var responses []chan error
+
+ // send a reconfiguration order to every running pipeline
+
+ for _, pipeline := range p.pipelines {
+ order := sds.ReconfigureOrder{
+ Type: orderType,
+ Config: config,
+ ResponseChan: make(chan error),
+ }
+ responses = append(responses, order.ResponseChan)
+
+ log.Debug("Sending SDS reconfiguration order:", string(order.Type))
+ pipeline.processor.ReconfigChan <- order
+ }
+
+ // reports if at least one error occurred
+
+ var rerr error
+ for _, response := range responses {
+ err := <-response
+ if err != nil {
+ rerr = multierror.Append(rerr, err)
+ }
+ close(response)
+ }
+
+ return rerr
+}
+
+// ReconfigureSDSStandardRules stores the SDS standard rules for the given provider.
+func (p *provider) ReconfigureSDSStandardRules(standardRules []byte) error {
+ return p.reconfigureSDS(standardRules, sds.StandardRules)
+}
+
+// ReconfigureSDSAgentConfig reconfigures the pipeline with the given
+// configuration received through Remote Configuration.
+func (p *provider) ReconfigureSDSAgentConfig(config []byte) error {
+ return p.reconfigureSDS(config, sds.AgentConfig)
+}
+
+// NextPipelineChan returns the next pipeline input channel
+func (p *provider) NextPipelineChan() chan *message.Message {
+ pipelinesLen := len(p.pipelines)
+ if pipelinesLen == 0 {
+ return nil
+ }
+ index := p.currentPipelineIndex.Inc() % uint32(pipelinesLen)
+ nextPipeline := p.pipelines[index]
+ return nextPipeline.InputChan
+}
+
+// Flush flushes synchronously all the contained pipeline of this provider.
+func (p *provider) Flush(ctx context.Context) {
+ for _, p := range p.pipelines {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ p.Flush(ctx)
+ }
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/encoder.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/encoder.go
new file mode 100644
index 0000000000..e3af636d6d
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/encoder.go
@@ -0,0 +1,36 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//nolint:revive // TODO(AML) Fix revive linter
+package processor
+
+import (
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+)
+
+// Encoder turns a message into a raw byte array ready to be sent.
+type Encoder interface {
+ Encode(msg *message.Message, hostname string) error
+}
+
+// toValidUtf8 ensures all characters are UTF-8.
+func toValidUtf8(msg []byte) string {
+ if utf8.Valid(msg) {
+ return string(msg)
+ }
+ str := make([]rune, 0, len(msg))
+ for i := range msg {
+ r, size := utf8.DecodeRune(msg[i:])
+ if r == utf8.RuneError && size == 1 {
+ str = append(str, unicode.ReplacementChar)
+ } else {
+ str = append(str, r)
+ }
+ }
+ return string(str)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/json.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/json.go
new file mode 100644
index 0000000000..59d9503d93
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/json.go
@@ -0,0 +1,62 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package processor
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+)
+
+const nanoToMillis = 1000000
+
+// JSONEncoder is a shared json encoder.
+var JSONEncoder Encoder = &jsonEncoder{}
+
+// jsonEncoder transforms a message into a JSON byte array.
+type jsonEncoder struct{}
+
+// JSON representation of a message.
+type jsonPayload struct {
+ Message string `json:"message"`
+ Status string `json:"status"`
+ Timestamp int64 `json:"timestamp"`
+ Hostname string `json:"hostname"`
+ Service string `json:"service"`
+ Source string `json:"ddsource"`
+ Tags string `json:"ddtags"`
+}
+
+// Encode encodes a message into a JSON byte array.
+func (j *jsonEncoder) Encode(msg *message.Message, hostname string) error {
+ if msg.State != message.StateRendered {
+ return fmt.Errorf("message passed to encoder isn't rendered")
+ }
+
+ ts := time.Now().UTC()
+ if !msg.ServerlessExtra.Timestamp.IsZero() {
+ ts = msg.ServerlessExtra.Timestamp
+ }
+
+ encoded, err := json.Marshal(jsonPayload{
+ Message: toValidUtf8(msg.GetContent()),
+ Status: msg.GetStatus(),
+ Timestamp: ts.UnixNano() / nanoToMillis,
+ Hostname: hostname,
+ Service: msg.Origin.Service(),
+ Source: msg.Origin.Source(),
+ Tags: msg.TagsToString(),
+ })
+
+ if err != nil {
+ return fmt.Errorf("can't encode the message: %v", err)
+ }
+
+ msg.SetEncoded(encoded)
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/json_serverless.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/json_serverless.go
new file mode 100644
index 0000000000..041da711c7
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/json_serverless.go
@@ -0,0 +1,84 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package processor
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+)
+
+// JSONServerlessEncoder is a shared json encoder sending a struct message field
+// instead of a bytes message field. This encoder is used in the AWS Lambda
+// serverless environment.
+var JSONServerlessEncoder Encoder = &jsonServerlessEncoder{}
+
+// jsonEncoder transforms a message into a JSON byte array.
+type jsonServerlessEncoder struct{}
+
+// JSON representation of a message.
+type jsonServerlessPayload struct {
+ Message jsonServerlessMessage `json:"message"`
+ Status string `json:"status"`
+ Timestamp int64 `json:"timestamp"`
+ Hostname string `json:"hostname"`
+ Service string `json:"service,omitempty"`
+ Source string `json:"ddsource"`
+ Tags string `json:"ddtags"`
+}
+
+type jsonServerlessMessage struct {
+ Message string `json:"message"`
+ Lambda *jsonServerlessLambda `json:"lambda,omitempty"`
+}
+
+type jsonServerlessLambda struct {
+ ARN string `json:"arn"`
+ RequestID string `json:"request_id,omitempty"`
+}
+
+// Encode encodes a message into a JSON byte array.
+func (j *jsonServerlessEncoder) Encode(msg *message.Message, hostname string) error {
+ if msg.State != message.StateRendered {
+ return fmt.Errorf("message passed to encoder isn't rendered")
+ }
+
+ ts := time.Now().UTC()
+ if !msg.ServerlessExtra.Timestamp.IsZero() {
+ ts = msg.ServerlessExtra.Timestamp
+ }
+
+ // add lambda metadata
+ var lambdaPart *jsonServerlessLambda
+ if l := msg.ServerlessExtra.Lambda; l != nil {
+ lambdaPart = &jsonServerlessLambda{
+ ARN: l.ARN,
+ RequestID: l.RequestID,
+ }
+ }
+
+ encoded, err := json.Marshal(jsonServerlessPayload{
+ Message: jsonServerlessMessage{
+ Message: toValidUtf8(msg.GetContent()),
+ Lambda: lambdaPart,
+ },
+ Status: msg.GetStatus(),
+ Timestamp: ts.UnixNano() / nanoToMillis,
+ Hostname: hostname,
+ Service: msg.Origin.Service(),
+ Source: msg.Origin.Source(),
+ Tags: msg.TagsToString(),
+ })
+
+ if err != nil {
+ return fmt.Errorf("can't encode the message: %v", err)
+ }
+
+ msg.SetEncoded(encoded)
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/processor.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/processor.go
new file mode 100644
index 0000000000..707e1874cd
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/processor.go
@@ -0,0 +1,220 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package processor
+
+import (
+ "context"
+ "sync"
+
+ "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface"
+ "github.com/DataDog/datadog-agent/comp/logs/agent/config"
+ "github.com/DataDog/datadog-agent/pkg/logs/diagnostic"
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/logs/metrics"
+ "github.com/DataDog/datadog-agent/pkg/logs/sds"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+// UnstructuredProcessingMetricName collects how many rules are used on unstructured
+// content for tailers capable of processing both unstructured and structured content.
+const UnstructuredProcessingMetricName = "datadog.logs_agent.tailer.unstructured_processing"
+
+// A Processor updates messages from an inputChan and pushes
+// in an outputChan.
+type Processor struct {
+ pipelineID int
+ inputChan chan *message.Message
+ outputChan chan *message.Message // strategy input
+ // ReconfigChan transports rules to use in order to reconfigure
+ // the processing rules of the SDS Scanner.
+ ReconfigChan chan sds.ReconfigureOrder
+ processingRules []*config.ProcessingRule
+ encoder Encoder
+ done chan struct{}
+ diagnosticMessageReceiver diagnostic.MessageReceiver
+ mu sync.Mutex
+ hostname hostnameinterface.Component
+
+ sds *sds.Scanner // configured through RC
+}
+
+// New returns an initialized Processor.
+func New(inputChan, outputChan chan *message.Message, processingRules []*config.ProcessingRule, encoder Encoder,
+ diagnosticMessageReceiver diagnostic.MessageReceiver, hostname hostnameinterface.Component, pipelineID int) *Processor {
+ sdsScanner := sds.CreateScanner(pipelineID)
+
+ return &Processor{
+ pipelineID: pipelineID,
+ inputChan: inputChan,
+ outputChan: outputChan, // strategy input
+ ReconfigChan: make(chan sds.ReconfigureOrder),
+ processingRules: processingRules,
+ encoder: encoder,
+ done: make(chan struct{}),
+ sds: sdsScanner,
+ diagnosticMessageReceiver: diagnosticMessageReceiver,
+ hostname: hostname,
+ }
+}
+
+// Start starts the Processor.
+func (p *Processor) Start() {
+ go p.run()
+}
+
+// Stop stops the Processor,
+// this call blocks until inputChan is flushed
+func (p *Processor) Stop() {
+ close(p.inputChan)
+ <-p.done
+ // once the processor mainloop is not running, it's safe
+ // to delete the sds scanner instance.
+ if p.sds != nil {
+ p.sds.Delete()
+ p.sds = nil
+ }
+}
+
+// Flush processes synchronously the messages that this processor has to process.
+func (p *Processor) Flush(ctx context.Context) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ if len(p.inputChan) == 0 {
+ return
+ }
+ msg := <-p.inputChan
+ p.processMessage(msg)
+ }
+ }
+}
+
+// run starts the processing of the inputChan
+func (p *Processor) run() {
+ defer func() {
+ p.done <- struct{}{}
+ }()
+
+ for {
+ select {
+ case msg, ok := <-p.inputChan:
+ if !ok { // channel has been closed
+ return
+ }
+ p.processMessage(msg)
+ p.mu.Lock() // block here if we're trying to flush synchronously
+ //nolint:staticcheck
+ p.mu.Unlock()
+ case order := <-p.ReconfigChan:
+ p.mu.Lock()
+ if err := p.sds.Reconfigure(order); err != nil {
+ log.Errorf("Error while reconfiguring the SDS scanner: %v", err)
+ order.ResponseChan <- err
+ } else {
+ order.ResponseChan <- nil
+ }
+ p.mu.Unlock()
+ }
+ }
+}
+
+func (p *Processor) processMessage(msg *message.Message) {
+ metrics.LogsDecoded.Add(1)
+ metrics.TlmLogsDecoded.Inc()
+
+ if toSend := p.applyRedactingRules(msg); toSend {
+ metrics.LogsProcessed.Add(1)
+ metrics.TlmLogsProcessed.Inc()
+
+ // render the message
+ rendered, err := msg.Render()
+ if err != nil {
+ log.Error("can't render the msg", err)
+ return
+ }
+ msg.SetRendered(rendered)
+
+ // report this message to diagnostic receivers (e.g. `stream-logs` command)
+ p.diagnosticMessageReceiver.HandleMessage(msg, rendered, "")
+
+ // encode the message to its final format, it is done in-place
+ if err := p.encoder.Encode(msg, p.GetHostname(msg)); err != nil {
+ log.Error("unable to encode msg ", err)
+ return
+ }
+
+ p.outputChan <- msg
+ }
+}
+
+// applyRedactingRules returns given a message if we should process it or not,
+// it applies the change directly on the Message content.
+func (p *Processor) applyRedactingRules(msg *message.Message) bool {
+ var content []byte = msg.GetContent()
+
+ // Use the internal scrubbing implementation of the Agent
+ // ---------------------------
+
+ rules := append(p.processingRules, msg.Origin.LogSource.Config.ProcessingRules...)
+ for _, rule := range rules {
+ switch rule.Type {
+ case config.ExcludeAtMatch:
+ // if this message matches, we ignore it
+ if rule.Regex.Match(content) {
+ return false
+ }
+ case config.IncludeAtMatch:
+ // if this message doesn't match, we ignore it
+ if !rule.Regex.Match(content) {
+ return false
+ }
+ case config.MaskSequences:
+ content = rule.Regex.ReplaceAll(content, rule.Placeholder)
+ }
+ }
+
+ // Use the SDS implementation
+ // --------------------------
+
+ // Global SDS scanner, applied on all log sources
+ if p.sds.IsReady() {
+ mutated, evtProcessed, err := p.sds.Scan(content, msg)
+ if err != nil {
+ log.Error("while using SDS to scan the log:", err)
+ } else if mutated {
+ content = evtProcessed
+ }
+ }
+
+ msg.SetContent(content)
+ return true // we want to send this message
+}
+
+// GetHostname returns the hostname to applied the given log message
+func (p *Processor) GetHostname(msg *message.Message) string {
+ if msg.Hostname != "" {
+ return msg.Hostname
+ }
+
+ if msg.Lambda != nil {
+ return msg.Lambda.ARN
+ }
+
+ if p.hostname == nil {
+ return "unknown"
+ }
+ hname, err := p.hostname.Get(context.TODO())
+ if err != nil {
+ // this scenario is not likely to happen since
+ // the agent cannot start without a hostname
+ hname = "unknown"
+ }
+ return hname
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/proto.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/proto.go
new file mode 100644
index 0000000000..573dca393d
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/proto.go
@@ -0,0 +1,45 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package processor
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/DataDog/agent-payload/v5/pb"
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+)
+
+// ProtoEncoder is a shared proto encoder.
+var ProtoEncoder Encoder = &protoEncoder{}
+
+// protoEncoder transforms a message into a protobuf byte array.
+type protoEncoder struct{}
+
+// Encode encodes a message into a protobuf byte array.
+func (p *protoEncoder) Encode(msg *message.Message, hostname string) error {
+ if msg.State != message.StateRendered {
+ return fmt.Errorf("message passed to encoder isn't rendered")
+ }
+
+ log := &pb.Log{
+ Message: toValidUtf8(msg.GetContent()),
+ Status: msg.GetStatus(),
+ Timestamp: time.Now().UTC().UnixNano(),
+ Hostname: hostname,
+ Service: msg.Origin.Service(),
+ Source: msg.Origin.Source(),
+ Tags: msg.Tags(),
+ }
+ encoded, err := log.Marshal()
+
+ if err != nil {
+ return fmt.Errorf("can't encode the message: %v", err)
+ }
+
+ msg.SetEncoded(encoded)
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/raw.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/raw.go
new file mode 100644
index 0000000000..ed4303d4b2
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/processor/raw.go
@@ -0,0 +1,91 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package processor
+
+import (
+ "fmt"
+ "regexp"
+ "time"
+
+ "github.com/DataDog/datadog-agent/comp/logs/agent/config"
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+)
+
+// RawEncoder is a shared raw encoder.
+var RawEncoder Encoder = &rawEncoder{}
+
+type rawEncoder struct{}
+
+func (r *rawEncoder) Encode(msg *message.Message, hostname string) error {
+ rendered, err := msg.Render()
+ if err != nil {
+ return fmt.Errorf("can't render the message: %v", err)
+ }
+
+ // if the first char is '<', we can assume it's already formatted as RFC5424, thus skip this step
+ // (for instance, using tcp forwarding. We don't want to override the hostname & co)
+ if len(rendered) > 0 && !isRFC5424Formatted(rendered) {
+ // fit RFC5424
+ // <%pri%>%protocol-version% %timestamp:::date-rfc3339% %HOSTNAME% %$!new-appname% - - - %msg%\n
+ extraContent := []byte("")
+
+ // Severity
+ extraContent = append(extraContent, message.StatusToSeverity(msg.GetStatus())...)
+
+ // Protocol version
+ extraContent = append(extraContent, '0')
+ extraContent = append(extraContent, ' ')
+
+ // Timestamp
+ extraContent = time.Now().UTC().AppendFormat(extraContent, config.DateFormat)
+ extraContent = append(extraContent, ' ')
+
+ extraContent = append(extraContent, []byte(hostname)...)
+ extraContent = append(extraContent, ' ')
+
+ // Service
+ service := msg.Origin.Service()
+ if service != "" {
+ extraContent = append(extraContent, []byte(service)...)
+ } else {
+ extraContent = append(extraContent, '-')
+ }
+
+ // Extra
+ extraContent = append(extraContent, []byte(" - - ")...)
+
+ // Tags
+ tagsPayload := msg.Origin.TagsPayload(msg.ProcessingTags)
+ if len(tagsPayload) > 0 {
+ extraContent = append(extraContent, tagsPayload...)
+ } else {
+ extraContent = append(extraContent, '-')
+ }
+ extraContent = append(extraContent, ' ')
+
+ extraContent = append(extraContent, rendered...)
+
+ msg.SetEncoded(extraContent)
+ }
+
+ // in this situation, we don't want to re-encode the data, just
+ // make sure its state is correct.
+ msg.State = message.StateEncoded
+ return nil
+}
+
+var rfc5424Pattern, _ = regexp.Compile("<[0-9]{1,3}>[0-9] ")
+
+func isRFC5424Formatted(content []byte) bool {
+ // RFC2424 formatted messages start with `<%pri%>%protocol-version% `
+ // pri is 1 to 3 digits, protocol-version is one digit (won't realisticly
+ // be more before we kill this custom code)
+ // As a result, the start is between 5 and 7 chars.
+ if len(content) < 8 { // even is start could be only 5 chars, RFC5424 must have other chars like `-`
+ return false
+ }
+ return rfc5424Pattern.Match(content[:8])
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/sds/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sds/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sds/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/sds/reconfigure.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sds/reconfigure.go
new file mode 100644
index 0000000000..5b3d3fe4c6
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sds/reconfigure.go
@@ -0,0 +1,26 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//nolint:revive
+package sds
+
+type ReconfigureOrderType string
+
+const (
+ // StandardRules triggers the storage of a new set of standard rules
+ // and reconfigure the internal SDS scanner with an existing user
+ // configuration if any.
+ StandardRules ReconfigureOrderType = "standard_rules"
+ // AgentConfig triggers a reconfiguration of the SDS scanner.
+ AgentConfig ReconfigureOrderType = "agent_config"
+)
+
+// ReconfigureOrder are used to trigger a reconfiguration
+// of the SDS scanner.
+type ReconfigureOrder struct {
+ Type ReconfigureOrderType
+ Config []byte
+ ResponseChan chan error
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/sds/rules.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sds/rules.go
new file mode 100644
index 0000000000..a5e32fe903
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sds/rules.go
@@ -0,0 +1,97 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//nolint:revive
+package sds
+
+// RulesConfig as sent by the Remote Configuration.
+// Equivalent of the groups in the UI.
+type RulesConfig struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ Rules []RuleConfig `json:"rules"`
+ IsEnabled bool `json:"is_enabled"`
+ Description string `json:"description"`
+}
+
+// MatchAction defines what's the action to do when there is a match.
+type MatchAction struct {
+ Type string `json:"type"`
+ Placeholder string `json:"placeholder"`
+ Direction string `json:"direction"`
+ CharacterCount uint32 `json:"character_count"`
+}
+
+// StandardRuleConfig as sent by the Remote Configuration;
+type StandardRuleConfig struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ Tags []string `json:"tags"`
+ Description string `json:"description"`
+ Definitions []StandardRuleDefinition `json:"definitions"`
+}
+
+// StandardRuleDefinition contains a versioned standard rule definition.
+type StandardRuleDefinition struct {
+ Version int `json:"version"`
+ Pattern string `json:"pattern"`
+ DefaultIncludedKeywords []string `json:"default_included_keywords"`
+ RequiredCapabilities []string `json:"required_capabilities"`
+}
+
+// StandardRulesConfig contains standard rules.
+type StandardRulesConfig struct {
+ Rules []StandardRuleConfig `json:"rules"`
+ Defaults StandardRulesDefaults `json:"defaults"`
+}
+
+// StandardRulesDefaults contains consts defaults information for
+// standard rules.
+type StandardRulesDefaults struct {
+ IncludedKeywordsCharCount uint32 `json:"included_keywords_char_count"`
+}
+
+// RuleConfig of rule as sent by the Remote Configuration.
+type RuleConfig struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Tags []string `json:"tags"`
+ Definition RuleDefinition `json:"definition"`
+ MatchAction MatchAction `json:"match_action"`
+ IncludedKeywords ProximityKeywords `json:"included_keywords"`
+ IsEnabled bool `json:"is_enabled"`
+}
+
+// ProximityKeywords definition in RC config.
+type ProximityKeywords struct {
+ Keywords []string `json:"keywords"`
+ CharacterCount uint32 `json:"character_count"`
+}
+
+// RuleDefinition definition in RC config.
+type RuleDefinition struct {
+ StandardRuleID string `json:"standard_rule_id"`
+ Pattern string `json:"pattern"`
+}
+
+// OnlyEnabled returns a new RulesConfig object containing only enabled rules.
+// Use this to filter out disabled rules.
+func (r RulesConfig) OnlyEnabled() RulesConfig {
+ // is the whole groupe disabled?
+ if !r.IsEnabled {
+ return RulesConfig{Rules: []RuleConfig{}}
+ }
+
+ rules := []RuleConfig{}
+ for _, rule := range r.Rules {
+ if rule.IsEnabled {
+ rules = append(rules, rule)
+ }
+ }
+ return RulesConfig{
+ Rules: rules,
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/sds/scanner.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sds/scanner.go
new file mode 100644
index 0000000000..effde9e28c
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sds/scanner.go
@@ -0,0 +1,420 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build sds
+
+//nolint:revive
+package sds
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/telemetry"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+ sds "github.com/DataDog/dd-sensitive-data-scanner/sds-go/go"
+)
+
+const ScannedTag = "sds_agent:true"
+
+const SDSEnabled = true
+
+var (
+ tlmSDSRulesState = telemetry.NewGaugeWithOpts("sds", "rules", []string{"pipeline", "state"},
+ "Rules state.", telemetry.Options{DefaultMetric: true})
+ tlmSDSReconfigError = telemetry.NewCounterWithOpts("sds", "reconfiguration_error", []string{"pipeline", "type", "error_type"},
+ "Count of SDS reconfiguration error.", telemetry.Options{DefaultMetric: true})
+ tlmSDSReconfigSuccess = telemetry.NewCounterWithOpts("sds", "reconfiguration_success", []string{"pipeline", "type"},
+ "Count of SDS reconfiguration success.", telemetry.Options{DefaultMetric: true})
+)
+
+// Scanner wraps an SDS Scanner implementation, adds reconfiguration
+// capabilities and telemetry on top of it.
+// Most of Scanner methods are not thread safe for performance reasons, the caller
+// has to ensure of the thread safety.
+type Scanner struct {
+ *sds.Scanner
+ // lock used to separate between the lifecycle of the scanner (Reconfigure, Delete)
+ // and the use of the scanner (Scan).
+ sync.Mutex
+ // standard rules as received through the remote configuration, indexed
+ // by the standard rule ID for O(1) access when receiving user configurations.
+ standardRules map[string]StandardRuleConfig
+ // standardDefaults contains some consts for using the standard rules definition.
+ standardDefaults StandardRulesDefaults
+ // rawConfig is the raw config previously received through RC.
+ rawConfig []byte
+ // configuredRules are stored on configuration to retrieve rules
+ // information on match. Use this read-only.
+ configuredRules []RuleConfig
+ // pipelineID is the logs pipeline ID for which we've created this scanner,
+ // stored as string as it is only used in the telemetry.
+ pipelineID string
+}
+
+// CreateScanner creates an SDS scanner.
+// Use `Reconfigure` to configure it manually.
+func CreateScanner(pipelineID int) *Scanner {
+ scanner := &Scanner{pipelineID: strconv.Itoa(pipelineID)}
+ log.Debugf("creating a new SDS scanner (internal id: %p)", scanner)
+ return scanner
+}
+
+// MatchActions as exposed by the RC configurations.
+const (
+ matchActionRCHash = "hash"
+ matchActionRCNone = "none"
+ matchActionRCPartialRedact = "partial_redact"
+ matchActionRCRedact = "redact"
+
+ RCPartialRedactFirstCharacters = "first"
+ RCPartialRedactLastCharacters = "last"
+
+ RCSecondaryValidationChineseIdChecksum = "chinese_id_checksum"
+ RCSecondaryValidationLuhnChecksum = "luhn_checksum"
+)
+
+// Reconfigure uses the given `ReconfigureOrder` to reconfigure in-memory
+// standard rules or user configuration.
+// The order contains both the kind of reconfiguration to do and the raw bytes
+// to apply the reconfiguration.
+// When receiving standard rules, user configuration are reloaded and scanners are
+// recreated to use the newly received standard rules.
+// This method is thread safe, a scan can't happen at the same time.
+func (s *Scanner) Reconfigure(order ReconfigureOrder) error {
+ if s == nil {
+ log.Warn("Trying to reconfigure a nil Scanner")
+ return nil
+ }
+
+ s.Lock()
+ defer s.Unlock()
+
+ log.Debugf("Reconfiguring SDS scanner (internal id: %p)", s)
+
+ switch order.Type {
+ case StandardRules:
+ // reconfigure the standard rules
+ err := s.reconfigureStandardRules(order.Config)
+
+ // if we already received a configuration and no errors happened while
+ // reconfiguring the standard rules: reapply the user configuration now.
+ if err == nil && s.rawConfig != nil {
+ if rerr := s.reconfigureRules(s.rawConfig); rerr != nil {
+ log.Error("Can't reconfigure SDS after having received standard rules:", rerr)
+ s.rawConfig = nil // we drop this configuration because it is unusable
+ if err == nil {
+ err = rerr
+ }
+ }
+ }
+ return err
+ case AgentConfig:
+ return s.reconfigureRules(order.Config)
+ }
+
+ return fmt.Errorf("Scanner.Reconfigure: Unknown order type: %v", order.Type)
+}
+
+// reconfigureStandardRules stores in-memory standard rules received through RC.
+// This is NOT reconfiguring the internal SDS scanner, call `reconfigureRules`
+// if you have to.
+// This method is NOT thread safe, the caller has to ensure the thread safety.
+func (s *Scanner) reconfigureStandardRules(rawConfig []byte) error {
+ if rawConfig == nil {
+ tlmSDSReconfigError.Inc(s.pipelineID, string(StandardRules), "nil_config")
+ return fmt.Errorf("Invalid nil raw configuration for standard rules")
+ }
+
+ var unmarshaled StandardRulesConfig
+ if err := json.Unmarshal(rawConfig, &unmarshaled); err != nil {
+ tlmSDSReconfigError.Inc(s.pipelineID, string(StandardRules), "cant_unmarshal")
+ return fmt.Errorf("Can't unmarshal raw configuration: %v", err)
+ }
+
+ // build a map for O(1) access when we'll receive configuration
+ standardRules := make(map[string]StandardRuleConfig)
+ for _, rule := range unmarshaled.Rules {
+ standardRules[rule.ID] = rule
+ }
+
+ s.standardRules = standardRules
+ s.standardDefaults = unmarshaled.Defaults
+
+ tlmSDSReconfigSuccess.Inc(s.pipelineID, string(StandardRules))
+ log.Info("Reconfigured", len(s.standardRules), "SDS standard rules.")
+ for _, rule := range s.standardRules {
+ log.Debug("Std rule:", rule.Name)
+ }
+
+ return nil
+}
+
+// reconfigureRules reconfigures the internal SDS scanner using the in-memory
+// standard rules. Could possibly delete and recreate the internal SDS scanner if
+// necessary.
+// This method is NOT thread safe, caller has to ensure the thread safety.
+func (s *Scanner) reconfigureRules(rawConfig []byte) error {
+ if rawConfig == nil {
+ tlmSDSReconfigError.Inc(s.pipelineID, string(AgentConfig), "nil_config")
+ return fmt.Errorf("Invalid nil raw configuration received for user configuration")
+ }
+
+ if s.standardRules == nil || len(s.standardRules) == 0 {
+ // store it for the next try
+ s.rawConfig = rawConfig
+ tlmSDSReconfigError.Inc(s.pipelineID, string(AgentConfig), "no_std_rules")
+ log.Info("Received an user configuration but no SDS standard rules available.")
+ return nil
+ }
+
+ var config RulesConfig
+ if err := json.Unmarshal(rawConfig, &config); err != nil {
+ tlmSDSReconfigError.Inc(s.pipelineID, string(AgentConfig), "cant_unmarshal")
+ return fmt.Errorf("Can't unmarshal raw configuration: %v", err)
+ }
+
+ // ignore disabled rules
+ totalRulesReceived := len(config.Rules)
+ config = config.OnlyEnabled()
+
+ log.Infof("Starting an SDS reconfiguration: %d rules received (in which %d are disabled)", totalRulesReceived, totalRulesReceived-len(config.Rules))
+
+ // if we received an empty array of rules or all rules disabled, interprets this as "stop SDS".
+ if len(config.Rules) == 0 {
+ log.Info("Received an empty configuration, stopping the SDS scanner.")
+ // destroy the old scanner
+ if s.Scanner != nil {
+ s.Scanner.Delete()
+ s.Scanner = nil
+ s.rawConfig = rawConfig
+ s.configuredRules = nil
+ tlmSDSReconfigSuccess.Inc(s.pipelineID, "shutdown")
+ }
+ return nil
+ }
+
+ // prepare the scanner rules
+ var sdsRules []sds.Rule
+ var malformedRulesCount int
+ var unknownStdRulesCount int
+ for _, userRule := range config.Rules {
+ // read the rule in the standard rules
+ standardRule, found := s.standardRules[userRule.Definition.StandardRuleID]
+ if !found {
+ log.Warnf("Referencing an unknown standard rule, id: %v", userRule.Definition.StandardRuleID)
+ unknownStdRulesCount += 1
+ continue
+ }
+
+ if rule, err := interpretRCRule(userRule, standardRule, s.standardDefaults); err != nil {
+ // we warn that we can't interpret this rule, but we continue in order
+ // to properly continue processing with the rest of the rules.
+ malformedRulesCount += 1
+ log.Warnf("%v", err.Error())
+ } else {
+ sdsRules = append(sdsRules, rule)
+ }
+ }
+
+ tlmSDSRulesState.Set(float64(malformedRulesCount), s.pipelineID, "malformed")
+ tlmSDSRulesState.Set(float64(unknownStdRulesCount), s.pipelineID, "unknown_std")
+
+ // create the new SDS Scanner
+ var scanner *sds.Scanner
+ var err error
+ if scanner, err = sds.CreateScanner(sdsRules); err != nil {
+ tlmSDSReconfigError.Inc(s.pipelineID, string(AgentConfig), "scanner_error")
+ return fmt.Errorf("while configuring an SDS Scanner: %v", err)
+ }
+
+ // destroy the old scanner
+ if s.Scanner != nil {
+ s.Scanner.Delete()
+ s.Scanner = nil
+ }
+
+ // store the raw configuration for a later refresh
+ // if we receive new standard rules
+ s.rawConfig = rawConfig
+ s.configuredRules = config.Rules
+
+ log.Info("Created an SDS scanner with", len(scanner.Rules), "enabled rules")
+ for _, rule := range s.configuredRules {
+ log.Debug("Configured rule:", rule.Name)
+ }
+ s.Scanner = scanner
+
+ tlmSDSRulesState.Set(float64(len(sdsRules)), s.pipelineID, "configured")
+ tlmSDSRulesState.Set(float64(totalRulesReceived-len(config.Rules)), s.pipelineID, "disabled")
+ tlmSDSReconfigSuccess.Inc(s.pipelineID, string(AgentConfig))
+
+ return nil
+}
+
+// interpretRCRule interprets a rule as received through RC to return
+// an sds.Rule usable with the shared library.
+// `standardRule` contains the definition, with the name, pattern, etc.
+// `userRule` contains the configuration done by the user: match action, etc.
+func interpretRCRule(userRule RuleConfig, standardRule StandardRuleConfig, defaults StandardRulesDefaults) (sds.Rule, error) {
+ var extraConfig sds.ExtraConfig
+
+ var defToUse = StandardRuleDefinition{Version: -1}
+
+ // go through all received definitions, use the most recent supported one.
+ // O(n) number of definitions in the rule.
+ for _, stdRuleDef := range standardRule.Definitions {
+ if defToUse.Version > stdRuleDef.Version {
+ continue
+ }
+
+ // The RC schema supports multiple of them,
+ // for now though, the lib only supports one, so we'll just use the first one.
+ reqCapabilitiesCount := len(stdRuleDef.RequiredCapabilities)
+ if reqCapabilitiesCount > 0 {
+ if reqCapabilitiesCount > 1 {
+ // TODO(remy): telemetry
+ log.Warnf("Standard rule '%v' with multiple required capabilities: %d. Only the first one will be used", standardRule.Name, reqCapabilitiesCount)
+ }
+ received := stdRuleDef.RequiredCapabilities[0]
+ switch received {
+ case RCSecondaryValidationChineseIdChecksum:
+ extraConfig.SecondaryValidator = sds.ChineseIdChecksum
+ defToUse = stdRuleDef
+ case RCSecondaryValidationLuhnChecksum:
+ extraConfig.SecondaryValidator = sds.LuhnChecksum
+ defToUse = stdRuleDef
+ default:
+ // we don't know this required capability, test another version
+ log.Warnf("unknown required capability: ", string(received))
+ continue
+ }
+ } else {
+ // no extra config to set
+ defToUse = stdRuleDef
+ }
+ }
+
+ if defToUse.Version == -1 {
+ // TODO(remy): telemetry
+ return sds.Rule{}, fmt.Errorf("unsupported rule with no compatible definition")
+ }
+
+ // we use the filled `CharacterCount` value to decide if we want
+ // to use the user provided configuration for proximity keywords
+ // or if we have to use the information provided in the std rules instead.
+ if userRule.IncludedKeywords.CharacterCount > 0 {
+ // proximity keywords configuration provided by the user
+ extraConfig.ProximityKeywords = sds.CreateProximityKeywordsConfig(userRule.IncludedKeywords.CharacterCount, userRule.IncludedKeywords.Keywords, nil)
+ } else if len(defToUse.DefaultIncludedKeywords) > 0 && defaults.IncludedKeywordsCharCount > 0 {
+ // the user has not specified proximity keywords
+ // use the proximity keywords provided by the standard rule if any
+ extraConfig.ProximityKeywords = sds.CreateProximityKeywordsConfig(defaults.IncludedKeywordsCharCount, defToUse.DefaultIncludedKeywords, nil)
+ }
+
+ // we've compiled all necessary information merging the standard rule and the user config
+ // create the rules for the scanner
+ matchAction := strings.ToLower(userRule.MatchAction.Type)
+ switch matchAction {
+ case matchActionRCNone:
+ return sds.NewMatchingRule(standardRule.Name, defToUse.Pattern, extraConfig), nil
+ case matchActionRCRedact:
+ return sds.NewRedactingRule(standardRule.Name, defToUse.Pattern, userRule.MatchAction.Placeholder, extraConfig), nil
+ case matchActionRCPartialRedact:
+ direction := sds.LastCharacters
+ switch userRule.MatchAction.Direction {
+ case string(RCPartialRedactLastCharacters):
+ direction = sds.LastCharacters
+ case string(RCPartialRedactFirstCharacters):
+ direction = sds.FirstCharacters
+ default:
+ log.Warnf("Unknown PartialRedact direction (%v), falling back on LastCharacters", userRule.MatchAction.Direction)
+ }
+ return sds.NewPartialRedactRule(standardRule.Name, defToUse.Pattern, userRule.MatchAction.CharacterCount, direction, extraConfig), nil
+ case matchActionRCHash:
+ return sds.NewHashRule(standardRule.Name, defToUse.Pattern, extraConfig), nil
+ }
+
+ return sds.Rule{}, fmt.Errorf("Unknown MatchAction type (%v) received through RC for rule '%s':", matchAction, standardRule.Name)
+}
+
+// Scan scans the given `event` using the internal SDS scanner.
+// Returns an error if the internal SDS scanner is not ready. If you need to
+// validate that the internal SDS scanner can be used, use `IsReady()`.
+// Returns a boolean indicating if the Scan has mutated the event and the returned
+// one should be used instead.
+// This method is thread safe, a reconfiguration can't happen at the same time.
+func (s *Scanner) Scan(event []byte, msg *message.Message) (bool, []byte, error) {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Scanner == nil {
+ return false, nil, fmt.Errorf("can't Scan with an unitialized scanner")
+ }
+
+ // scanning
+ scanResult, err := s.Scanner.Scan(event)
+ if len(scanResult.Matches) > 0 {
+ for _, match := range scanResult.Matches {
+ if rc, err := s.GetRuleByIdx(match.RuleIdx); err != nil {
+ log.Warnf("can't apply rule tags: %v", err)
+ } else {
+ msg.ProcessingTags = append(msg.ProcessingTags, rc.Tags...)
+ }
+ }
+ }
+ // TODO(remy): in the future, we might want to do it differently than
+ // using a tag.
+ msg.ProcessingTags = append(msg.ProcessingTags, ScannedTag)
+
+ return scanResult.Mutated, scanResult.Event, err
+}
+
+// GetRuleByIdx returns the configured rule by its idx, referring to the idx
+// that the SDS scanner writes in its internal response.
+func (s *Scanner) GetRuleByIdx(idx uint32) (RuleConfig, error) {
+ if s.Scanner == nil {
+ return RuleConfig{}, fmt.Errorf("scanner not configured")
+ }
+ if uint32(len(s.configuredRules)) <= idx {
+ return RuleConfig{}, fmt.Errorf("scanner not containing enough rules")
+ }
+ return s.configuredRules[idx], nil
+}
+
+// Delete deallocates the internal SDS scanner.
+// This method is thread safe, a reconfiguration or a scan can't happen at the same time.
+func (s *Scanner) Delete() {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.Scanner != nil {
+ s.Scanner.Delete()
+ s.rawConfig = nil
+ s.configuredRules = nil
+ }
+ s.Scanner = nil
+}
+
+// IsReady returns true if this Scanner can be used
+// to scan events and that at least one rule would be applied.
+// This method is NOT thread safe, caller has to ensure the thread safety.
+func (s *Scanner) IsReady() bool {
+ if s == nil {
+ return false
+ }
+ if s.Scanner == nil {
+ return false
+ }
+ if len(s.Scanner.Rules) == 0 {
+ return false
+ }
+
+ return true
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/sds/scanner_nosds.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sds/scanner_nosds.go
new file mode 100644
index 0000000000..7ed96c1062
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sds/scanner_nosds.go
@@ -0,0 +1,50 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build !sds
+
+//nolint:revive
+package sds
+
+import (
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+)
+
+const SDSEnabled = false
+
+// Scanner mock.
+type Scanner struct {
+}
+
+// Match mock.
+type Match struct {
+ RuleIdx uint32
+}
+
+// CreateScanner creates a scanner for unsupported platforms/architectures.
+func CreateScanner(_ int) *Scanner {
+ return nil
+}
+
+// Reconfigure mocks the Reconfigure function.
+func (s *Scanner) Reconfigure(_ ReconfigureOrder) error {
+ return nil
+}
+
+// Delete mocks the Delete function.
+func (s *Scanner) Delete() {}
+
+// GetRuleByIdx mocks the GetRuleByIdx function.
+func (s *Scanner) GetRuleByIdx(_ uint32) (RuleConfig, error) {
+ return RuleConfig{}, nil
+}
+
+// IsReady mocks the IsReady function.
+func (s *Scanner) IsReady() bool { return false }
+
+// Scan mocks the Scan function.
+func (s *Scanner) Scan(_ []byte, _ *message.Message) (bool, []byte, error) {
+ return false, nil, nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/batch_strategy.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/batch_strategy.go
new file mode 100644
index 0000000000..310bdffd16
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/batch_strategy.go
@@ -0,0 +1,164 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//nolint:revive // TODO(AML) Fix revive linter
+package sender
+
+import (
+ "time"
+
+ "github.com/benbjohnson/clock"
+
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/telemetry"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+var (
+ tlmDroppedTooLarge = telemetry.NewCounter("logs_sender_batch_strategy", "dropped_too_large", []string{"pipeline"}, "Number of payloads dropped due to being too large")
+)
+
+// batchStrategy contains all the logic to send logs in batch.
+type batchStrategy struct {
+ inputChan chan *message.Message
+ outputChan chan *message.Payload
+ flushChan chan struct{}
+ buffer *MessageBuffer
+ // pipelineName provides a name for the strategy to differentiate it from other instances in other internal pipelines
+ pipelineName string
+ serializer Serializer
+ batchWait time.Duration
+ contentEncoding ContentEncoding
+ stopChan chan struct{} // closed when the goroutine has finished
+ clock clock.Clock
+}
+
+// NewBatchStrategy returns a new batch concurrent strategy with the specified batch & content size limits
+func NewBatchStrategy(inputChan chan *message.Message,
+ outputChan chan *message.Payload,
+ flushChan chan struct{},
+ serializer Serializer,
+ batchWait time.Duration,
+ maxBatchSize int,
+ maxContentSize int,
+ pipelineName string,
+ contentEncoding ContentEncoding) Strategy {
+ return newBatchStrategyWithClock(inputChan, outputChan, flushChan, serializer, batchWait, maxBatchSize, maxContentSize, pipelineName, clock.New(), contentEncoding)
+}
+
+func newBatchStrategyWithClock(inputChan chan *message.Message,
+ outputChan chan *message.Payload,
+ flushChan chan struct{},
+ serializer Serializer,
+ batchWait time.Duration,
+ maxBatchSize int,
+ maxContentSize int,
+ pipelineName string,
+ clock clock.Clock,
+ contentEncoding ContentEncoding) Strategy {
+
+ return &batchStrategy{
+ inputChan: inputChan,
+ outputChan: outputChan,
+ flushChan: flushChan,
+ buffer: NewMessageBuffer(maxBatchSize, maxContentSize),
+ serializer: serializer,
+ batchWait: batchWait,
+ contentEncoding: contentEncoding,
+ stopChan: make(chan struct{}),
+ pipelineName: pipelineName,
+ clock: clock,
+ }
+}
+
+// Stop flushes the buffer and stops the strategy
+func (s *batchStrategy) Stop() {
+ close(s.inputChan)
+ <-s.stopChan
+}
+
+// Start reads the incoming messages and accumulates them to a buffer. The buffer is
+// encoded (optionally compressed) and written to a Payload which goes to the next
+// step in the pipeline.
+func (s *batchStrategy) Start() {
+
+ go func() {
+ flushTicker := s.clock.Ticker(s.batchWait)
+ defer func() {
+ s.flushBuffer(s.outputChan)
+ flushTicker.Stop()
+ close(s.stopChan)
+ }()
+ for {
+ select {
+ case m, isOpen := <-s.inputChan:
+
+ if !isOpen {
+ // inputChan has been closed, no more payloads are expected
+ return
+ }
+ s.processMessage(m, s.outputChan)
+ case <-flushTicker.C:
+ // flush the payloads at a regular interval so pending messages don't wait here for too long.
+ s.flushBuffer(s.outputChan)
+ case <-s.flushChan:
+ // flush payloads on demand, used for infrequently running serverless functions
+ s.flushBuffer(s.outputChan)
+ }
+ }
+ }()
+}
+
+func (s *batchStrategy) processMessage(m *message.Message, outputChan chan *message.Payload) {
+ if m.Origin != nil {
+ m.Origin.LogSource.LatencyStats.Add(m.GetLatency())
+ }
+ added := s.buffer.AddMessage(m)
+ if !added || s.buffer.IsFull() {
+ s.flushBuffer(outputChan)
+ }
+ if !added {
+ // it's possible that the m could not be added because the buffer was full
+ // so we need to retry once again
+ if !s.buffer.AddMessage(m) {
+ log.Warnf("Dropped message in pipeline=%s reason=too-large ContentLength=%d ContentSizeLimit=%d", s.pipelineName, len(m.GetContent()), s.buffer.ContentSizeLimit())
+ tlmDroppedTooLarge.Inc(s.pipelineName)
+ }
+ }
+}
+
+// flushBuffer sends all the messages that are stored in the buffer and forwards them
+// to the next stage of the pipeline.
+func (s *batchStrategy) flushBuffer(outputChan chan *message.Payload) {
+ if s.buffer.IsEmpty() {
+ return
+ }
+ messages := s.buffer.GetMessages()
+ s.buffer.Clear()
+ // Logging specifically for DBM pipelines, which seem to fail to send more often than other pipelines.
+ // pipelineName comes from epforwarder.passthroughPipelineDescs.eventType, and these names are constants in the epforwarder package.
+ if s.pipelineName == "dbm-samples" || s.pipelineName == "dbm-metrics" || s.pipelineName == "dbm-activity" {
+ log.Debugf("Flushing buffer and sending %d messages for pipeline %s", len(messages), s.pipelineName)
+ }
+ s.sendMessages(messages, outputChan)
+}
+
+func (s *batchStrategy) sendMessages(messages []*message.Message, outputChan chan *message.Payload) {
+ serializedMessage := s.serializer.Serialize(messages)
+ log.Debugf("Send messages for pipeline %s (msg_count:%d, content_size=%d, avg_msg_size=%.2f)", s.pipelineName, len(messages), len(serializedMessage), float64(len(serializedMessage))/float64(len(messages)))
+
+ encodedPayload, err := s.contentEncoding.encode(serializedMessage)
+ if err != nil {
+ log.Warn("Encoding failed - dropping payload", err)
+ return
+ }
+
+ outputChan <- &message.Payload{
+ Messages: messages,
+ Encoded: encodedPayload,
+ Encoding: s.contentEncoding.name(),
+ UnencodedSize: len(serializedMessage),
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/content_encoding.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/content_encoding.go
new file mode 100644
index 0000000000..75f59587ff
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/content_encoding.go
@@ -0,0 +1,73 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package sender
+
+import (
+ "bytes"
+ "compress/gzip"
+)
+
+// ContentEncoding encodes the payload
+type ContentEncoding interface {
+ name() string
+ encode(payload []byte) ([]byte, error)
+}
+
+// IdentityContentType encodes the payload using the identity function
+var IdentityContentType ContentEncoding = &identityContentType{}
+
+type identityContentType struct{}
+
+func (c *identityContentType) name() string {
+ return "identity"
+}
+
+func (c *identityContentType) encode(payload []byte) ([]byte, error) {
+ return payload, nil
+}
+
+// GzipContentEncoding encodes the payload using gzip algorithm
+type GzipContentEncoding struct {
+ level int
+}
+
+// NewGzipContentEncoding creates a new Gzip content type
+func NewGzipContentEncoding(level int) *GzipContentEncoding {
+ if level < gzip.NoCompression {
+ level = gzip.NoCompression
+ } else if level > gzip.BestCompression {
+ level = gzip.BestCompression
+ }
+
+ return &GzipContentEncoding{
+ level,
+ }
+}
+
+func (c *GzipContentEncoding) name() string {
+ return "gzip"
+}
+
+func (c *GzipContentEncoding) encode(payload []byte) ([]byte, error) {
+ var compressedPayload bytes.Buffer
+ gzipWriter, err := gzip.NewWriterLevel(&compressedPayload, c.level)
+ if err != nil {
+ return nil, err
+ }
+ _, err = gzipWriter.Write(payload)
+ if err != nil {
+ return nil, err
+ }
+ err = gzipWriter.Flush()
+ if err != nil {
+ return nil, err
+ }
+ err = gzipWriter.Close()
+ if err != nil {
+ return nil, err
+ }
+ return compressedPayload.Bytes(), nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/destination_sender.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/destination_sender.go
new file mode 100644
index 0000000000..381dada567
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/destination_sender.go
@@ -0,0 +1,141 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2022-present Datadog, Inc.
+
+package sender
+
+import (
+ "sync"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ "github.com/DataDog/datadog-agent/pkg/logs/client"
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+// DestinationSender wraps a destination to send messages blocking on a full buffer, but not blocking when
+// a destination is retrying
+type DestinationSender struct {
+ config pkgconfigmodel.Reader
+ sendEnabled bool
+ input chan *message.Payload
+ destination client.Destination
+ retryReader chan bool
+ stopChan <-chan struct{}
+ retryLock sync.Mutex
+ lastRetryState bool
+ cancelSendChan chan struct{}
+ lastSendSucceeded bool
+}
+
+// NewDestinationSender creates a new DestinationSender
+func NewDestinationSender(config pkgconfigmodel.Reader, destination client.Destination, output chan *message.Payload, bufferSize int) *DestinationSender {
+ inputChan := make(chan *message.Payload, bufferSize)
+ retryReader := make(chan bool, 1)
+ stopChan := destination.Start(inputChan, output, retryReader)
+
+ d := &DestinationSender{
+ config: config,
+ sendEnabled: true,
+ input: inputChan,
+ destination: destination,
+ retryReader: retryReader,
+ stopChan: stopChan,
+ retryLock: sync.Mutex{},
+ lastRetryState: false,
+ cancelSendChan: nil,
+ lastSendSucceeded: false,
+ }
+ d.startRetryReader()
+
+ return d
+}
+
+func (d *DestinationSender) startRetryReader() {
+ go func() {
+ for v := range d.retryReader {
+ d.retryLock.Lock()
+ if d.cancelSendChan != nil && !d.lastRetryState {
+ select {
+ case d.cancelSendChan <- struct{}{}:
+ default:
+ }
+ }
+ d.lastRetryState = v
+ d.retryLock.Unlock()
+ }
+ }()
+}
+
+// Stop stops the DestinationSender
+func (d *DestinationSender) Stop() {
+ close(d.input)
+ <-d.stopChan
+ close(d.retryReader)
+}
+
+func (d *DestinationSender) canSend() bool {
+ if d.destination.IsMRF() {
+ if !d.sendEnabled {
+ if d.config.GetBool("multi_region_failover.enabled") && d.config.GetBool("multi_region_failover.failover_logs") {
+ d.sendEnabled = true
+ log.Infof("Forwarder for domain %v has been failed over to, enabling it for MRF.", d.destination.Target())
+ } else {
+ log.Debugf("Forwarder for domain %v is disabled; dropping transaction for this domain.", d.destination.Target())
+ }
+ } else {
+ if !d.config.GetBool("multi_region_failover.enabled") || !d.config.GetBool("multi_region_failover.failover_logs") {
+ d.sendEnabled = false
+ log.Infof("Forwarder for domain %v was disabled; transactions will be dropped for this domain.", d.destination.Target())
+ }
+ }
+ }
+
+ return d.sendEnabled
+}
+
+// Send sends a payload and blocks if the input is full. It will not block if the destination
+// is retrying payloads and will cancel the blocking attempt if the retry state changes
+func (d *DestinationSender) Send(payload *message.Payload) bool {
+ d.lastSendSucceeded = false
+ d.retryLock.Lock()
+ d.cancelSendChan = make(chan struct{}, 1)
+ isRetrying := d.lastRetryState
+ d.retryLock.Unlock()
+
+ defer func() {
+ d.retryLock.Lock()
+ close(d.cancelSendChan)
+ d.cancelSendChan = nil
+ d.retryLock.Unlock()
+ }()
+
+ if !isRetrying {
+ // if we can't send, we consider the send call as successful because we don't want to block the
+ // pipeline when HA failover is knowingly disabled
+ if !d.canSend() {
+ d.lastSendSucceeded = true
+ return true
+ }
+
+ select {
+ case d.input <- payload:
+ d.lastSendSucceeded = true
+ return true
+ case <-d.cancelSendChan:
+ }
+ }
+ return false
+}
+
+// NonBlockingSend tries to send the payload and fails silently if the input is full.
+// returns false if the buffer is full - true if successful.
+func (d *DestinationSender) NonBlockingSend(payload *message.Payload) bool {
+ select {
+ case d.input <- payload:
+ return true
+ default:
+ }
+ return false
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/message_buffer.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/message_buffer.go
new file mode 100644
index 0000000000..b14551a156
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/message_buffer.go
@@ -0,0 +1,64 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package sender
+
+import (
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+)
+
+// MessageBuffer accumulates messages to a buffer until the max capacity is reached.
+type MessageBuffer struct {
+ messageBuffer []*message.Message
+ contentSize int
+ contentSizeLimit int
+}
+
+// NewMessageBuffer returns a new MessageBuffer.
+func NewMessageBuffer(batchSizeLimit int, contentSizeLimit int) *MessageBuffer {
+ return &MessageBuffer{
+ messageBuffer: make([]*message.Message, 0, batchSizeLimit),
+ contentSizeLimit: contentSizeLimit,
+ }
+}
+
+// AddMessage adds a message to the buffer if there is still some free space,
+// returns true if the message was added.
+func (p *MessageBuffer) AddMessage(message *message.Message) bool {
+ contentSize := len(message.GetContent())
+ if len(p.messageBuffer) < cap(p.messageBuffer) && p.contentSize+contentSize <= p.contentSizeLimit {
+ p.messageBuffer = append(p.messageBuffer, message)
+ p.contentSize += contentSize
+ return true
+ }
+ return false
+}
+
+// Clear reinitializes the buffer.
+func (p *MessageBuffer) Clear() {
+ // create a new buffer to avoid race conditions
+ p.messageBuffer = make([]*message.Message, 0, cap(p.messageBuffer))
+ p.contentSize = 0
+}
+
+// GetMessages returns the messages stored in the buffer.
+func (p *MessageBuffer) GetMessages() []*message.Message {
+ return p.messageBuffer
+}
+
+// IsFull returns true if the buffer is full.
+func (p *MessageBuffer) IsFull() bool {
+ return len(p.messageBuffer) == cap(p.messageBuffer) || p.contentSize == p.contentSizeLimit
+}
+
+// IsEmpty returns true if the buffer is empty.
+func (p *MessageBuffer) IsEmpty() bool {
+ return len(p.messageBuffer) == 0
+}
+
+// ContentSizeLimit returns the configured content size limit. Messages above this limit are not accepted.
+func (p *MessageBuffer) ContentSizeLimit() int {
+ return p.contentSizeLimit
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/sender.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/sender.go
new file mode 100644
index 0000000000..384ec383d0
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/sender.go
@@ -0,0 +1,141 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package sender
+
+import (
+ "strconv"
+ "time"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ "github.com/DataDog/datadog-agent/pkg/logs/client"
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/telemetry"
+)
+
+var (
+ tlmPayloadsDropped = telemetry.NewCounterWithOpts("logs_sender", "payloads_dropped", []string{"reliable", "destination"}, "Payloads dropped", telemetry.Options{DefaultMetric: true})
+ tlmMessagesDropped = telemetry.NewCounterWithOpts("logs_sender", "messages_dropped", []string{"reliable", "destination"}, "Messages dropped", telemetry.Options{DefaultMetric: true})
+ tlmSendWaitTime = telemetry.NewCounter("logs_sender", "send_wait", []string{}, "Time spent waiting for all sends to finish")
+)
+
+// Sender sends logs to different destinations. Destinations can be either
+// reliable or unreliable. The sender ensures that logs are sent to at least
+// one reliable destination and will block the pipeline if they are in an
+// error state. Unreliable destinations will only send logs when at least
+// one reliable destination is also sending logs. However they do not update
+// the auditor or block the pipeline if they fail. There will always be at
+// least 1 reliable destination (the main destination).
+type Sender struct {
+ config pkgconfigmodel.Reader
+ inputChan chan *message.Payload
+ outputChan chan *message.Payload
+ destinations *client.Destinations
+ done chan struct{}
+ bufferSize int
+}
+
+// NewSender returns a new sender.
+func NewSender(config pkgconfigmodel.Reader, inputChan chan *message.Payload, outputChan chan *message.Payload, destinations *client.Destinations, bufferSize int) *Sender {
+ return &Sender{
+ config: config,
+ inputChan: inputChan,
+ outputChan: outputChan,
+ destinations: destinations,
+ done: make(chan struct{}),
+ bufferSize: bufferSize,
+ }
+}
+
+// Start starts the sender.
+func (s *Sender) Start() {
+ go s.run()
+}
+
+// Stop stops the sender,
+// this call blocks until inputChan is flushed
+func (s *Sender) Stop() {
+ close(s.inputChan)
+ <-s.done
+}
+
+func (s *Sender) run() {
+ reliableDestinations := buildDestinationSenders(s.config, s.destinations.Reliable, s.outputChan, s.bufferSize)
+
+ sink := additionalDestinationsSink(s.bufferSize)
+ unreliableDestinations := buildDestinationSenders(s.config, s.destinations.Unreliable, sink, s.bufferSize)
+
+ for payload := range s.inputChan {
+ var startInUse = time.Now()
+
+ sent := false
+ for !sent {
+ for _, destSender := range reliableDestinations {
+ if destSender.Send(payload) {
+ sent = true
+ }
+ }
+
+ if !sent {
+ // Throttle the poll loop while waiting for a send to succeed
+ // This will only happen when all reliable destinations
+ // are blocked so logs have no where to go.
+ time.Sleep(100 * time.Millisecond)
+ }
+ }
+
+ for i, destSender := range reliableDestinations {
+ // If an endpoint is stuck in the previous step, try to buffer the payloads if we have room to mitigate
+ // loss on intermittent failures.
+ if !destSender.lastSendSucceeded {
+ if !destSender.NonBlockingSend(payload) {
+ tlmPayloadsDropped.Inc("true", strconv.Itoa(i))
+ tlmMessagesDropped.Add(float64(len(payload.Messages)), "true", strconv.Itoa(i))
+ }
+ }
+ }
+
+ // Attempt to send to unreliable destinations
+ for i, destSender := range unreliableDestinations {
+ if !destSender.NonBlockingSend(payload) {
+ tlmPayloadsDropped.Inc("false", strconv.Itoa(i))
+ tlmMessagesDropped.Add(float64(len(payload.Messages)), "false", strconv.Itoa(i))
+ }
+ }
+
+ inUse := float64(time.Since(startInUse) / time.Millisecond)
+ tlmSendWaitTime.Add(inUse)
+ }
+
+ // Cleanup the destinations
+ for _, destSender := range reliableDestinations {
+ destSender.Stop()
+ }
+ for _, destSender := range unreliableDestinations {
+ destSender.Stop()
+ }
+ close(sink)
+ s.done <- struct{}{}
+}
+
+// Drains the output channel from destinations that don't update the auditor.
+func additionalDestinationsSink(bufferSize int) chan *message.Payload {
+ sink := make(chan *message.Payload, bufferSize)
+ go func() {
+ // drain channel, stop when channel is closed
+ //nolint:revive // TODO(AML) Fix revive linter
+ for range sink {
+ }
+ }()
+ return sink
+}
+
+func buildDestinationSenders(config pkgconfigmodel.Reader, destinations []client.Destination, output chan *message.Payload, bufferSize int) []*DestinationSender {
+ destinationSenders := []*DestinationSender{}
+ for _, destination := range destinations {
+ destinationSenders = append(destinationSenders, NewDestinationSender(config, destination, output, bufferSize))
+ }
+ return destinationSenders
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/serializer.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/serializer.go
new file mode 100644
index 0000000000..54389ca5a7
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/serializer.go
@@ -0,0 +1,67 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package sender
+
+import (
+ "bytes"
+
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+)
+
+var (
+ // LineSerializer is a shared line serializer.
+ LineSerializer Serializer = &lineSerializer{}
+ // ArraySerializer is a shared line serializer.
+ ArraySerializer Serializer = &arraySerializer{}
+)
+
+// Serializer transforms a batch of messages into a payload.
+// It is the one rendering the messages (i.e. either directly using
+// raw []byte data from unstructured messages or turning structured
+// messages into []byte data).
+type Serializer interface {
+ Serialize(messages []*message.Message) []byte
+}
+
+// lineSerializer transforms a message array into a payload
+// separating content by new line character.
+type lineSerializer struct{}
+
+// Serialize concatenates all messages using
+// a new line characater as a separator,
+// for example:
+// "{"message":"content1"}", "{"message":"content2"}"
+// returns, "{"message":"content1"}\n{"message":"content2"}"
+func (s *lineSerializer) Serialize(messages []*message.Message) []byte {
+ var buffer bytes.Buffer
+ for i, message := range messages {
+ if i > 0 {
+ buffer.WriteByte('\n')
+ }
+ buffer.Write(message.GetContent())
+ }
+ return buffer.Bytes()
+}
+
+// arraySerializer transforms a message array into a array string payload.
+type arraySerializer struct{}
+
+// Serialize transforms all messages into a array string
+// for example:
+// "{"message":"content1"}", "{"message":"content2"}"
+// returns, "[{"message":"content1"},{"message":"content2"}]"
+func (s *arraySerializer) Serialize(messages []*message.Message) []byte {
+ var buffer bytes.Buffer
+ buffer.WriteByte('[')
+ for i, message := range messages {
+ if i > 0 {
+ buffer.WriteByte(',')
+ }
+ buffer.Write(message.GetContent())
+ }
+ buffer.WriteByte(']')
+ return buffer.Bytes()
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/strategy.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/strategy.go
new file mode 100644
index 0000000000..91222d8950
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/strategy.go
@@ -0,0 +1,16 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package sender
+
+// Strategy should contain all logic to send logs to a remote destination
+// and forward them the next stage of the pipeline. In the logs pipeline,
+// the strategy implementation should convert a stream of incoming Messages
+// to a stream of Payloads that the sender can handle. A strategy is startable
+// and stoppable so that the pipeline can manage it's lifecycle.
+type Strategy interface {
+ Start()
+ Stop()
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/stream_strategy.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/stream_strategy.go
new file mode 100644
index 0000000000..e31d455e5e
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sender/stream_strategy.go
@@ -0,0 +1,62 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package sender
+
+import (
+ "github.com/DataDog/datadog-agent/pkg/logs/message"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+// streamStrategy is a Strategy that creates one Payload for each Message, containing
+// that Message's Content. This is used for TCP destinations, which stream the output
+// without batching multiple messages together.
+type streamStrategy struct {
+ inputChan chan *message.Message
+ outputChan chan *message.Payload
+ contentEncoding ContentEncoding
+ done chan struct{}
+}
+
+// NewStreamStrategy creates a new stream strategy
+func NewStreamStrategy(inputChan chan *message.Message, outputChan chan *message.Payload, contentEncoding ContentEncoding) Strategy {
+ return &streamStrategy{
+ inputChan: inputChan,
+ outputChan: outputChan,
+ contentEncoding: contentEncoding,
+ done: make(chan struct{}),
+ }
+}
+
+// Send sends one message at a time and forwards them to the next stage of the pipeline.
+func (s *streamStrategy) Start() {
+ go func() {
+ for msg := range s.inputChan {
+ if msg.Origin != nil {
+ msg.Origin.LogSource.LatencyStats.Add(msg.GetLatency())
+ }
+
+ encodedPayload, err := s.contentEncoding.encode(msg.GetContent())
+ if err != nil {
+ log.Warn("Encoding failed - dropping payload", err)
+ return
+ }
+
+ s.outputChan <- &message.Payload{
+ Messages: []*message.Message{msg},
+ Encoded: encodedPayload,
+ Encoding: s.contentEncoding.name(),
+ UnencodedSize: len(msg.GetContent()),
+ }
+ }
+ s.done <- struct{}{}
+ }()
+}
+
+// Stop stops the strategy
+func (s *streamStrategy) Stop() {
+ close(s.inputChan)
+ <-s.done
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/sources/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sources/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sources/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/sources/replaceable_source.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sources/replaceable_source.go
new file mode 100644
index 0000000000..4caa6a52f3
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sources/replaceable_source.go
@@ -0,0 +1,99 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2022-present Datadog, Inc.
+
+//nolint:revive // TODO(AML) Fix revive linter
+package sources
+
+import (
+ "sync"
+
+ "github.com/DataDog/datadog-agent/comp/logs/agent/config"
+ status "github.com/DataDog/datadog-agent/pkg/logs/status/utils"
+)
+
+// ReplaceableSource is a thread safe wrapper for a LogSource that allows it to be replaced with a new one.
+// There are some uncommon circumstances where a source needs to be replaced on an active tailer. This wrapper
+// helps ensure there is not any unsafe access to the many underlying properties in a LogSource.
+type ReplaceableSource struct {
+ sync.RWMutex
+ source *LogSource
+}
+
+// NewReplaceableSource returns a new ReplaceableSource
+func NewReplaceableSource(source *LogSource) *ReplaceableSource {
+ return &ReplaceableSource{
+ source: source,
+ }
+}
+
+// Replace replaces the source with a new one
+func (r *ReplaceableSource) Replace(source *LogSource) {
+ r.Lock()
+ defer r.Unlock()
+ r.source = source
+}
+
+// Status gets the underlying status
+func (r *ReplaceableSource) Status() *status.LogStatus {
+ r.RLock()
+ defer r.RUnlock()
+ return r.source.Status
+}
+
+// Config gets the underlying config
+func (r *ReplaceableSource) Config() *config.LogsConfig {
+ r.RLock()
+ defer r.RUnlock()
+ return r.source.Config
+}
+
+// AddInput registers an input as being handled by this source.
+func (r *ReplaceableSource) AddInput(input string) {
+ r.RLock()
+ defer r.RUnlock()
+ r.source.AddInput(input)
+}
+
+// RemoveInput removes an input from this source.
+func (r *ReplaceableSource) RemoveInput(input string) {
+ r.RLock()
+ defer r.RUnlock()
+ r.source.RemoveInput(input)
+}
+
+// RecordBytes reports bytes to the source expvars
+func (r *ReplaceableSource) RecordBytes(n int64) {
+ r.RLock()
+ defer r.RUnlock()
+ r.source.RecordBytes(n)
+}
+
+// GetSourceType gets the source type
+func (r *ReplaceableSource) GetSourceType() SourceType {
+ r.RLock()
+ defer r.RUnlock()
+ return r.source.sourceType
+}
+
+// UnderlyingSource gets the underlying log source
+func (r *ReplaceableSource) UnderlyingSource() *LogSource {
+ r.RLock()
+ defer r.RUnlock()
+ return r.source
+}
+
+// RegisterInfo registers some info to display on the status page
+func (r *ReplaceableSource) RegisterInfo(i status.InfoProvider) {
+ r.RLock()
+ defer r.RUnlock()
+ r.source.RegisterInfo(i)
+}
+
+// GetInfo gets an InfoProvider instance by the key
+func (r *ReplaceableSource) GetInfo(key string) status.InfoProvider {
+ r.RLock()
+ defer r.RUnlock()
+ return r.source.GetInfo(key)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/sources/source.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sources/source.go
new file mode 100644
index 0000000000..73e44e55b5
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sources/source.go
@@ -0,0 +1,198 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package sources
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/DataDog/datadog-agent/comp/logs/agent/config"
+ status "github.com/DataDog/datadog-agent/pkg/logs/status/utils"
+ "github.com/DataDog/datadog-agent/pkg/util/statstracker"
+)
+
+// SourceType used for log line parsing logic.
+// TODO: remove this logic.
+type SourceType string
+
+const (
+ // DockerSourceType docker source type
+ DockerSourceType SourceType = "docker"
+ // KubernetesSourceType kubernetes source type
+ KubernetesSourceType SourceType = "kubernetes"
+)
+
+// LogSource holds a reference to an integration name and a log configuration, and allows to track errors and
+// successful operations on it. Both name and configuration are static for now and determined at creation time.
+// Changing the status is designed to be thread safe.
+type LogSource struct {
+ Name string
+ Config *config.LogsConfig
+ Status *status.LogStatus
+ inputs map[string]bool
+ lock *sync.Mutex
+ Messages *config.Messages
+ // sourceType is the type of the source that we are tailing whereas Config.Type is the type of the tailer
+ // that reads log lines for this source. E.g, a sourceType == containerd and Config.Type == file means that
+ // the agent is tailing a file to read logs of a containerd container
+ sourceType SourceType
+ info *status.InfoRegistry
+ // In the case that the source is overridden, keep a reference to the parent for bubbling up information about the child
+ ParentSource *LogSource
+ // LatencyStats tracks internal stats on the time spent by messages from this source in a processing pipeline, i.e.
+ // the duration between when a message is decoded by the tailer/listener/decoder and when the message is handled by a sender
+ LatencyStats *statstracker.Tracker
+ BytesRead *status.CountInfo
+ hiddenFromStatus bool
+}
+
+// NewLogSource creates a new log source.
+func NewLogSource(name string, cfg *config.LogsConfig) *LogSource {
+ source := &LogSource{
+ Name: name,
+ Config: cfg,
+ Status: status.NewLogStatus(),
+ inputs: make(map[string]bool),
+ lock: &sync.Mutex{},
+ Messages: config.NewMessages(),
+ BytesRead: status.NewCountInfo("Bytes Read"),
+ info: status.NewInfoRegistry(),
+ LatencyStats: statstracker.NewTracker(time.Hour*24, time.Hour),
+ hiddenFromStatus: false,
+ }
+ source.RegisterInfo(source.BytesRead)
+ source.RegisterInfo(source.LatencyStats)
+ return source
+}
+
+// AddInput registers an input as being handled by this source.
+func (s *LogSource) AddInput(input string) {
+ s.lock.Lock()
+ s.inputs[input] = true
+ s.lock.Unlock()
+}
+
+// RemoveInput removes an input from this source.
+func (s *LogSource) RemoveInput(input string) {
+ s.lock.Lock()
+ delete(s.inputs, input)
+ s.lock.Unlock()
+}
+
+// GetInputs returns the inputs handled by this source.
+func (s *LogSource) GetInputs() []string {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ inputs := make([]string, 0, len(s.inputs))
+ for input := range s.inputs {
+ inputs = append(inputs, input)
+ }
+ return inputs
+}
+
+// SetSourceType sets a format that give information on how the source lines should be parsed
+func (s *LogSource) SetSourceType(sourceType SourceType) {
+ s.lock.Lock()
+ s.sourceType = sourceType
+ s.lock.Unlock()
+}
+
+// GetSourceType returns the sourceType used by this source
+func (s *LogSource) GetSourceType() SourceType {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ return s.sourceType
+}
+
+// RegisterInfo registers some info to display on the status page
+func (s *LogSource) RegisterInfo(i status.InfoProvider) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ s.info.Register(i)
+}
+
+// GetInfo gets an InfoProvider instance by the key
+func (s *LogSource) GetInfo(key string) status.InfoProvider {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ return s.info.Get(key)
+}
+
+// GetInfoStatus returns a primitive representation of the info for the status page
+func (s *LogSource) GetInfoStatus() map[string][]string {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ return s.info.Rendered()
+}
+
+// HideFromStatus hides the source from the status output
+func (s *LogSource) HideFromStatus() {
+ s.lock.Lock()
+ s.hiddenFromStatus = true
+ s.lock.Unlock()
+}
+
+// IsHiddenFromStatus returns true if this source should be hidden from the status output
+func (s *LogSource) IsHiddenFromStatus() bool {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ return s.hiddenFromStatus
+}
+
+// RecordBytes reports bytes to the source expvars
+// Since `container_collect_all` reports all docker logs as a single source (even though the source is overridden internally),
+// we need to report the byte count to the parent source used to populate the status page.
+func (s *LogSource) RecordBytes(n int64) {
+ s.BytesRead.Add(n)
+
+ // In some cases like `container_collect_all` we need to report the byte count to the parent source
+ // used to populate the status page.
+ if s.ParentSource != nil {
+ s.ParentSource.BytesRead.Add(n)
+ }
+}
+
+// Dump provides a dump of the LogSource contents, for debugging purposes. If
+// multiline is true, the result contains newlines for readability.
+func (s *LogSource) Dump(multiline bool) string {
+ if s == nil {
+ return "&LogSource(nil)"
+ }
+
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ var b strings.Builder
+
+ ws := func(fmt string) string {
+ if multiline {
+ return "\n\t" + fmt
+ }
+ return " " + fmt
+ }
+
+ indent := func(dump string) string {
+ if multiline {
+ return strings.ReplaceAll(dump, "\n", "\n\t")
+ }
+ return dump
+ }
+
+ fmt.Fprintf(&b, ws("&LogsSource @ %p = {"), s)
+ fmt.Fprintf(&b, ws("Name: %#v,"), s.Name)
+ fmt.Fprintf(&b, ws("Config: %s,"), indent(s.Config.Dump(multiline)))
+ fmt.Fprintf(&b, ws("Status: %s,"), indent(s.Status.Dump()))
+ fmt.Fprintf(&b, ws("inputs: %#v,"), s.inputs)
+ fmt.Fprintf(&b, ws("Messages: %#v,"), s.Messages.GetMessages())
+ fmt.Fprintf(&b, ws("sourceType: %#v,"), s.sourceType)
+ fmt.Fprintf(&b, ws("info: %#v,"), s.info)
+ fmt.Fprintf(&b, ws("parentSource: %p,"), s.ParentSource)
+ fmt.Fprintf(&b, ws("LatencyStats: %#v,"), s.LatencyStats)
+ fmt.Fprintf(&b, ws("hiddenFromStatus: %t}"), s.hiddenFromStatus)
+ return b.String()
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/sources/sources.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sources/sources.go
new file mode 100644
index 0000000000..e6f5918c68
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/sources/sources.go
@@ -0,0 +1,192 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package sources
+
+import (
+ "sync"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+// LogSources serves as the interface between Schedulers and Launchers, distributing
+// notifications of added/removed LogSources to subscribed Launchers.
+//
+// Each subscription receives its own unbuffered channel for sources, and should
+// consume from the channel quickly to avoid blocking other goroutines. There is
+// no means to unsubscribe.
+//
+// If any sources have been added when GetAddedForType is called, then those sources
+// are immediately sent to the channel.
+//
+// This type is threadsafe, and all of its methods can be called concurrently.
+type LogSources struct {
+ mu sync.Mutex
+ sources []*LogSource
+ added []chan *LogSource
+ addedByType map[string][]chan *LogSource
+ removed []chan *LogSource
+ removedByType map[string][]chan *LogSource
+}
+
+// NewLogSources creates a new log sources.
+func NewLogSources() *LogSources {
+ return &LogSources{
+ addedByType: make(map[string][]chan *LogSource),
+ removedByType: make(map[string][]chan *LogSource),
+ }
+}
+
+// AddSource adds a new source.
+//
+// All of the subscribers registered for this source's type (src.Config.Type) will be
+// notified.
+func (s *LogSources) AddSource(source *LogSource) {
+ log.Tracef("Adding %s", source.Dump(false))
+ s.mu.Lock()
+ s.sources = append(s.sources, source)
+ if source.Config == nil || source.Config.Validate() != nil {
+ s.mu.Unlock()
+ return
+ }
+ streams := s.added
+ streamsForType := s.addedByType[source.Config.Type]
+ s.mu.Unlock()
+
+ for _, stream := range streams {
+ stream <- source
+ }
+
+ for _, stream := range streamsForType {
+ stream <- source
+ }
+}
+
+// RemoveSource removes a source.
+//
+// All of the subscribers registered for this source's type (src.Config.Type) will be
+// notified of its removal.
+func (s *LogSources) RemoveSource(source *LogSource) {
+ log.Tracef("Removing %s", source.Dump(false))
+ s.mu.Lock()
+ var sourceFound bool
+ for i, src := range s.sources {
+ if src == source {
+ s.sources = append(s.sources[:i], s.sources[i+1:]...)
+ sourceFound = true
+ break
+ }
+ }
+ streams := s.removed
+ streamsForType := s.removedByType[source.Config.Type]
+ s.mu.Unlock()
+
+ if sourceFound {
+ for _, stream := range streams {
+ stream <- source
+ }
+ for _, stream := range streamsForType {
+ stream <- source
+ }
+ }
+}
+
+// SubscribeAll returns two channels carrying notifications of all added and
+// removed sources, respectively. This guarantees consistency if sources are
+// added or removed concurrently.
+//
+// Any sources added before this call are delivered from a new goroutine.
+func (s *LogSources) SubscribeAll() (added chan *LogSource, removed chan *LogSource) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ added = make(chan *LogSource)
+ removed = make(chan *LogSource)
+
+ s.added = append(s.added, added)
+ s.removed = append(s.removed, removed)
+
+ existingSources := append([]*LogSource{}, s.sources...) // clone for goroutine
+ go func() {
+ for _, source := range existingSources {
+ added <- source
+ }
+ }()
+
+ return
+}
+
+// SubscribeForType returns two channels carrying notifications of added and
+// removed sources with the given type, respectively. This guarantees
+// consistency if sources are added or removed concurrently.
+//
+// Any sources added before this call are delivered from a new goroutine.
+func (s *LogSources) SubscribeForType(sourceType string) (added chan *LogSource, removed chan *LogSource) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ added = make(chan *LogSource)
+ removed = make(chan *LogSource)
+
+ if _, exists := s.addedByType[sourceType]; !exists {
+ s.addedByType[sourceType] = []chan *LogSource{}
+ }
+ s.addedByType[sourceType] = append(s.addedByType[sourceType], added)
+
+ if _, exists := s.removedByType[sourceType]; !exists {
+ s.removedByType[sourceType] = []chan *LogSource{}
+ }
+ s.removedByType[sourceType] = append(s.removedByType[sourceType], removed)
+
+ existingSources := append([]*LogSource{}, s.sources...) // clone for goroutine
+ go func() {
+ for _, source := range existingSources {
+ if source.Config.Type == sourceType {
+ added <- source
+ }
+ }
+ }()
+
+ return
+}
+
+// GetAddedForType returns a channel carrying notifications of new sources
+// with the given type.
+//
+// Any sources added before this call are delivered from a new goroutine.
+func (s *LogSources) GetAddedForType(sourceType string) chan *LogSource {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ _, exists := s.addedByType[sourceType]
+ if !exists {
+ s.addedByType[sourceType] = []chan *LogSource{}
+ }
+
+ stream := make(chan *LogSource)
+ s.addedByType[sourceType] = append(s.addedByType[sourceType], stream)
+
+ existingSources := append([]*LogSource{}, s.sources...) // clone for goroutine
+ go func() {
+ for _, source := range existingSources {
+ if source.Config.Type == sourceType {
+ stream <- source
+ }
+ }
+ }()
+
+ return stream
+}
+
+// GetSources returns all the sources currently held. The result is copied and
+// will not be modified after it is returned. However, the copy in the LogSources
+// instance may change in that time (changing indexes or adding/removing entries).
+func (s *LogSources) GetSources() []*LogSource {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ clone := append([]*LogSource{}, s.sources...)
+ return clone
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface/status.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface/status.go
new file mode 100644
index 0000000000..9ae507e56e
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface/status.go
@@ -0,0 +1,17 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+// Package statusinterface describes status methods required in logs agent modules
+package statusinterface
+
+// Status is the type for status methods
+type Status interface {
+ // AddGlobalWarning keeps track of a warning message to display on the status.
+ AddGlobalWarning(key string, warning string)
+
+ // RemoveGlobalWarning loses track of a warning message
+ // that does not need to be displayed on the status anymore.
+ RemoveGlobalWarning(key string)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface/status_mock.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface/status_mock.go
new file mode 100644
index 0000000000..751455bb60
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface/status_mock.go
@@ -0,0 +1,22 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package statusinterface
+
+type mockStatusProvider struct{}
+
+// AddGlobalWarning keeps track of a warning message to display on the status.
+func (mp *mockStatusProvider) AddGlobalWarning(string, string) {
+}
+
+// RemoveGlobalWarning loses track of a warning message
+// that does not need to be displayed on the status anymore.
+func (mp *mockStatusProvider) RemoveGlobalWarning(string) {
+}
+
+// NewStatusProviderMock returns a mock instance of statusinterface to be used in tests
+func NewStatusProviderMock() Status {
+ return &mockStatusProvider{}
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/status/utils/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/logs/status/utils/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/status/utils/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/status/utils/info.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/status/utils/info.go
new file mode 100644
index 0000000000..c102fddfd2
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/status/utils/info.go
@@ -0,0 +1,167 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//nolint:revive // TODO(AML) Fix revive linter
+package utils
+
+import (
+ "fmt"
+ "sync"
+
+ "go.uber.org/atomic"
+)
+
+// InfoProvider is a general interface to hold and render info for the status page.
+//
+// When implementing InfoProvider - be aware of the 2 ways it is used by the status page:
+//
+// 1. when a single message is returned, the status page will display a single line:
+// InfoKey(): Info()[0]
+//
+// 2. when multiple messages are returned, the status page will display an indented list:
+// InfoKey():
+// Info()[0]
+// Info()[1]
+// Info()[n]
+//
+// InfoKey only needs to be unique per source, and should be human readable.
+type InfoProvider interface {
+ InfoKey() string
+ Info() []string
+}
+
+// CountInfo records a simple count
+type CountInfo struct {
+ count *atomic.Int64
+ key string
+}
+
+// NewCountInfo creates a new CountInfo instance
+func NewCountInfo(key string) *CountInfo {
+ return &CountInfo{
+ count: atomic.NewInt64(0),
+ key: key,
+ }
+}
+
+// Add a new value to the count
+func (c *CountInfo) Add(v int64) {
+ c.count.Add(v)
+}
+
+// Get the underlying value of the count
+func (c *CountInfo) Get() int64 {
+ return c.count.Load()
+}
+
+// InfoKey returns the key
+func (c *CountInfo) InfoKey() string {
+ return c.key
+}
+
+// Info returns the info
+func (c *CountInfo) Info() []string {
+ return []string{fmt.Sprintf("%d", c.count.Load())}
+}
+
+// MappedInfo collects multiple info messages with a unique key
+type MappedInfo struct {
+ lock sync.Mutex
+ key string
+ messages map[string]string
+}
+
+// NewMappedInfo creates a new MappedInfo instance
+func NewMappedInfo(key string) *MappedInfo {
+ return &MappedInfo{
+ key: key,
+ messages: make(map[string]string),
+ }
+}
+
+// SetMessage sets a message with a unique key
+func (m *MappedInfo) SetMessage(key string, message string) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ m.messages[key] = message
+}
+
+// RemoveMessage removes a message with a unique key
+func (m *MappedInfo) RemoveMessage(key string) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ delete(m.messages, key)
+}
+
+// InfoKey returns the key
+func (m *MappedInfo) InfoKey() string {
+ return m.key
+}
+
+// Info returns the info
+func (m *MappedInfo) Info() []string {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ info := []string{}
+ for _, v := range m.messages {
+ info = append(info, v)
+ }
+ return info
+}
+
+// InfoRegistry keeps track of info providers
+type InfoRegistry struct {
+ lock sync.Mutex
+ info map[string]InfoProvider
+}
+
+// NewInfoRegistry creates a new InfoRegistry instance
+func NewInfoRegistry() *InfoRegistry {
+ return &InfoRegistry{
+ info: make(map[string]InfoProvider),
+ }
+}
+
+// Register adds an info provider
+func (i *InfoRegistry) Register(info InfoProvider) {
+ i.lock.Lock()
+ defer i.lock.Unlock()
+ key := info.InfoKey()
+ i.info[key] = info
+}
+
+// Get returns the provider for a given key, or nil
+func (i *InfoRegistry) Get(key string) InfoProvider {
+ i.lock.Lock()
+ defer i.lock.Unlock()
+ return i.info[key]
+}
+
+// All returns all registered info providers.
+func (i *InfoRegistry) All() []InfoProvider {
+ i.lock.Lock()
+ defer i.lock.Unlock()
+ info := make([]InfoProvider, 0, len(i.info))
+ for _, v := range i.info {
+ info = append(info, v)
+ }
+
+ return info
+}
+
+// Rendered renders the info for display on the status page.
+func (i *InfoRegistry) Rendered() map[string][]string {
+ i.lock.Lock()
+ defer i.lock.Unlock()
+ info := make(map[string][]string)
+
+ for _, v := range i.info {
+ if len(v.Info()) == 0 {
+ continue
+ }
+ info[v.InfoKey()] = v.Info()
+ }
+ return info
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/logs/status/utils/status.go b/vendor/github.com/DataDog/datadog-agent/pkg/logs/status/utils/status.go
new file mode 100644
index 0000000000..f4c95344a2
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/logs/status/utils/status.go
@@ -0,0 +1,114 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package utils
+
+import (
+ "fmt"
+ "sync"
+)
+
+type status int
+
+const (
+ isPending status = iota
+ isSuccess
+ isError
+)
+
+// LogStatus tracks errors and success.
+type LogStatus struct {
+ status status
+ err string
+ mu *sync.Mutex
+}
+
+// NewLogStatus creates a new log status.
+func NewLogStatus() *LogStatus {
+ return &LogStatus{
+ status: isPending,
+ mu: &sync.Mutex{},
+ }
+}
+
+// Success sets the status to success.
+func (s *LogStatus) Success() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.status = isSuccess
+ s.err = ""
+}
+
+// Error records the given error and invalidates the source.
+func (s *LogStatus) Error(err error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.status = isError
+ s.err = fmt.Sprintf("Error: %s", err.Error())
+}
+
+// IsPending returns whether the current status is not yet determined.
+func (s *LogStatus) IsPending() bool {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.status == isPending
+}
+
+// IsSuccess returns whether the current status is a success.
+func (s *LogStatus) IsSuccess() bool {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.status == isSuccess
+}
+
+// IsError returns whether the current status is an error.
+func (s *LogStatus) IsError() bool {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.status == isError
+}
+
+// GetError returns the error.
+func (s *LogStatus) GetError() string {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.err
+}
+
+// Dump provides a single-line dump of the status, for debugging purposes.
+func (s *LogStatus) Dump() string {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ var status string
+ switch s.status {
+ case isPending:
+ status = "isPending"
+ case isSuccess:
+ status = "isSuccess"
+ case isError:
+ status = "isError"
+ default:
+ status = fmt.Sprintf("%d", s.status)
+ }
+ return fmt.Sprintf("&LogStatus{status: %s, err: %#v}", status, s.err)
+}
+
+// String returns a human readable representation of the status.
+func (s *LogStatus) String() string {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ switch s.status {
+ case isPending:
+ return "pending"
+ case isSuccess:
+ return "success"
+ case isError:
+ return "error"
+ default:
+ return fmt.Sprintf("unknown status: %d", s.status)
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/json_scanner.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/json_scanner.go
index ab3ce4a8e5..6c490bbcf0 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/json_scanner.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/json_scanner.go
@@ -533,9 +533,7 @@ func stateNul(s *scanner, c byte) int {
// stateError is the state after reaching a syntax error,
// such as after reading `[1}` or `5.1.2`.
-//
-//nolint:revive // TODO(APM) Fix revive linter
-func stateError(s *scanner, c byte) int {
+func stateError(_ *scanner, _ byte) int {
return scanError
}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/obfuscate.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/obfuscate.go
index ed30de603b..c244efb14e 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/obfuscate.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/obfuscate.go
@@ -107,6 +107,7 @@ type ObfuscationMode string
// ObfuscationMode valid values
const (
+ NormalizeOnly = ObfuscationMode("normalize_only")
ObfuscateOnly = ObfuscationMode("obfuscate_only")
ObfuscateAndNormalize = ObfuscationMode("obfuscate_and_normalize")
)
@@ -145,12 +146,12 @@ type SQLConfig struct {
// ObfuscationMode specifies the obfuscation mode to use for go-sqllexer pkg.
// When specified, obfuscator will attempt to use go-sqllexer pkg to obfuscate (and normalize) SQL queries.
- // Valid values are "obfuscate_only", "obfuscate_and_normalize"
+ // Valid values are "normalize_only", "obfuscate_only", "obfuscate_and_normalize"
ObfuscationMode ObfuscationMode `json:"obfuscation_mode" yaml:"obfuscation_mode"`
// RemoveSpaceBetweenParentheses specifies whether to remove spaces between parentheses.
// By default, spaces are inserted between parentheses during normalization.
- // This option is only valid when ObfuscationMode is "obfuscate_and_normalize".
+ // This option is only valid when ObfuscationMode is "normalize_only" or "obfuscate_and_normalize".
RemoveSpaceBetweenParentheses bool `json:"remove_space_between_parentheses" yaml:"remove_space_between_parentheses"`
// KeepNull specifies whether to disable obfuscate NULL value with ?.
@@ -167,12 +168,12 @@ type SQLConfig struct {
// KeepTrailingSemicolon specifies whether to keep trailing semicolon.
// By default, trailing semicolon is removed during normalization.
- // This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize".
+ // This option is only valid when ObfuscationMode is "normalize_only" or "obfuscate_and_normalize".
KeepTrailingSemicolon bool `json:"keep_trailing_semicolon" yaml:"keep_trailing_semicolon"`
// KeepIdentifierQuotation specifies whether to keep identifier quotation, e.g. "my_table" or [my_table].
// By default, identifier quotation is removed during normalization.
- // This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize".
+ // This option is only valid when ObfuscationMode is "normalize_only" or "obfuscate_and_normalize".
KeepIdentifierQuotation bool `json:"keep_identifier_quotation" yaml:"keep_identifier_quotation"`
// Cache reports whether the obfuscator should use a LRU look-up cache for SQL obfuscations.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/redis.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/redis.go
index e9700c40ab..70a1323eef 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/redis.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/redis.go
@@ -245,10 +245,8 @@ func obfuscateRedisCmd(out *strings.Builder, cmd string, args ...string) {
out.WriteString(strings.Join(args, " "))
}
-// removeAllRedisArgs will take in a command and obfuscate all arguments following
+// RemoveAllRedisArgs will take in a command and obfuscate all arguments following
// the command, regardless of if the command is valid Redis or not
-//
-//nolint:revive // TODO(APM) Fix revive linter
func (*Obfuscator) RemoveAllRedisArgs(rediscmd string) string {
fullCmd := strings.Fields(rediscmd)
if len(fullCmd) == 0 {
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql.go
index 807c9fb5f6..18fc120a73 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql.go
@@ -426,17 +426,22 @@ func (o *Obfuscator) ObfuscateSQLExecPlan(jsonPlan string, normalize bool) (stri
// ObfuscateWithSQLLexer obfuscates the given SQL query using the go-sqllexer package.
// If ObfuscationMode is set to ObfuscateOnly, the query will be obfuscated without normalizing it.
func (o *Obfuscator) ObfuscateWithSQLLexer(in string, opts *SQLConfig) (*ObfuscatedQuery, error) {
- if opts.ObfuscationMode != ObfuscateOnly && opts.ObfuscationMode != ObfuscateAndNormalize {
+ if opts.ObfuscationMode != NormalizeOnly && opts.ObfuscationMode != ObfuscateOnly && opts.ObfuscationMode != ObfuscateAndNormalize {
return nil, fmt.Errorf("invalid obfuscation mode: %s", opts.ObfuscationMode)
}
- obfuscator := sqllexer.NewObfuscator(
- sqllexer.WithReplaceDigits(opts.ReplaceDigits),
- sqllexer.WithDollarQuotedFunc(opts.DollarQuotedFunc),
- sqllexer.WithReplacePositionalParameter(!opts.KeepPositionalParameter),
- sqllexer.WithReplaceBoolean(!opts.KeepBoolean),
- sqllexer.WithReplaceNull(!opts.KeepNull),
- )
+ var obfuscator *sqllexer.Obfuscator
+
+ if opts.ObfuscationMode == ObfuscateOnly || opts.ObfuscationMode == ObfuscateAndNormalize {
+ obfuscator = sqllexer.NewObfuscator(
+ sqllexer.WithReplaceDigits(opts.ReplaceDigits),
+ sqllexer.WithDollarQuotedFunc(opts.DollarQuotedFunc),
+ sqllexer.WithReplacePositionalParameter(!opts.KeepPositionalParameter),
+ sqllexer.WithReplaceBoolean(!opts.KeepBoolean),
+ sqllexer.WithReplaceNull(!opts.KeepNull),
+ )
+ }
+
if opts.ObfuscationMode == ObfuscateOnly {
// Obfuscate the query without normalizing it.
out := obfuscator.Obfuscate(in, sqllexer.WithDBMS(sqllexer.DBMSType(opts.DBMS)))
@@ -461,12 +466,22 @@ func (o *Obfuscator) ObfuscateWithSQLLexer(in string, opts *SQLConfig) (*Obfusca
sqllexer.WithKeepTrailingSemicolon(opts.KeepTrailingSemicolon),
sqllexer.WithKeepIdentifierQuotation(opts.KeepIdentifierQuotation),
)
- out, statementMetadata, err := sqllexer.ObfuscateAndNormalize(
- in,
- obfuscator,
- normalizer,
- sqllexer.WithDBMS(sqllexer.DBMSType(opts.DBMS)),
- )
+
+ var out string
+ var statementMetadata *sqllexer.StatementMetadata
+ var err error
+
+ if opts.ObfuscationMode == NormalizeOnly {
+ // Normalize the query without obfuscating it.
+ out, statementMetadata, err = normalizer.Normalize(in, sqllexer.WithDBMS(sqllexer.DBMSType(opts.DBMS)))
+ } else {
+ out, statementMetadata, err = sqllexer.ObfuscateAndNormalize(
+ in,
+ obfuscator,
+ normalizer,
+ sqllexer.WithDBMS(sqllexer.DBMSType(opts.DBMS)),
+ )
+ }
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql_tokenizer.go b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql_tokenizer.go
index 9ab9928817..e379d7dde4 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql_tokenizer.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql_tokenizer.go
@@ -610,9 +610,9 @@ func (tkn *SQLTokenizer) scanIdentifier() (TokenKind, []byte) {
return ID, t
}
-//nolint:revive // TODO(APM) Fix revive linter
-func (tkn *SQLTokenizer) scanVariableIdentifier(prefix rune) (TokenKind, []byte) {
+func (tkn *SQLTokenizer) scanVariableIdentifier(_ rune) (TokenKind, []byte) {
for tkn.advance(); tkn.lastChar != ')' && tkn.lastChar != EndChar; tkn.advance() {
+ continue
}
tkn.advance()
if !isLetter(tkn.lastChar) {
@@ -623,8 +623,7 @@ func (tkn *SQLTokenizer) scanVariableIdentifier(prefix rune) (TokenKind, []byte)
return Variable, tkn.bytes()
}
-//nolint:revive // TODO(APM) Fix revive linter
-func (tkn *SQLTokenizer) scanFormatParameter(prefix rune) (TokenKind, []byte) {
+func (tkn *SQLTokenizer) scanFormatParameter(_ rune) (TokenKind, []byte) {
tkn.advance()
return Variable, tkn.bytes()
}
@@ -677,8 +676,7 @@ func (tkn *SQLTokenizer) scanDollarQuotedString() (TokenKind, []byte) {
return DollarQuotedString, buf.Bytes()
}
-//nolint:revive // TODO(APM) Fix revive linter
-func (tkn *SQLTokenizer) scanPreparedStatement(prefix rune) (TokenKind, []byte) {
+func (tkn *SQLTokenizer) scanPreparedStatement(_ rune) (TokenKind, []byte) {
// a prepared statement expect a digit identifier like $1
if !isDigit(tkn.lastChar) {
tkn.setErr(`prepared statements must start with digits, got "%c" (%d)`, tkn.lastChar, tkn.lastChar)
@@ -695,8 +693,7 @@ func (tkn *SQLTokenizer) scanPreparedStatement(prefix rune) (TokenKind, []byte)
return PreparedStatement, buff
}
-//nolint:revive // TODO(APM) Fix revive linter
-func (tkn *SQLTokenizer) scanEscapeSequence(braces rune) (TokenKind, []byte) {
+func (tkn *SQLTokenizer) scanEscapeSequence(_ rune) (TokenKind, []byte) {
for tkn.lastChar != '}' && tkn.lastChar != EndChar {
tkn.advance()
}
@@ -825,8 +822,7 @@ func (tkn *SQLTokenizer) scanString(delim rune, kind TokenKind) (TokenKind, []by
return kind, buf.Bytes()
}
-//nolint:revive // TODO(APM) Fix revive linter
-func (tkn *SQLTokenizer) scanCommentType1(prefix string) (TokenKind, []byte) {
+func (tkn *SQLTokenizer) scanCommentType1(_ string) (TokenKind, []byte) {
for tkn.lastChar != EndChar {
if tkn.lastChar == '\n' {
tkn.advance()
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats.pb.go
index b3ffa3ab12..f1d0275172 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats.pb.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.30.0
-// protoc v4.23.4
+// protoc v4.25.3
// source: datadog/trace/stats.proto
package trace
@@ -20,6 +20,55 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
+type TraceRootFlag int32
+
+const (
+ TraceRootFlag_NOT_SET TraceRootFlag = 0
+ TraceRootFlag_TRUE TraceRootFlag = 1
+ TraceRootFlag_FALSE TraceRootFlag = 2
+)
+
+// Enum value maps for TraceRootFlag.
+var (
+ TraceRootFlag_name = map[int32]string{
+ 0: "NOT_SET",
+ 1: "TRUE",
+ 2: "FALSE",
+ }
+ TraceRootFlag_value = map[string]int32{
+ "NOT_SET": 0,
+ "TRUE": 1,
+ "FALSE": 2,
+ }
+)
+
+func (x TraceRootFlag) Enum() *TraceRootFlag {
+ p := new(TraceRootFlag)
+ *p = x
+ return p
+}
+
+func (x TraceRootFlag) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (TraceRootFlag) Descriptor() protoreflect.EnumDescriptor {
+ return file_datadog_trace_stats_proto_enumTypes[0].Descriptor()
+}
+
+func (TraceRootFlag) Type() protoreflect.EnumType {
+ return &file_datadog_trace_stats_proto_enumTypes[0]
+}
+
+func (x TraceRootFlag) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use TraceRootFlag.Descriptor instead.
+func (TraceRootFlag) EnumDescriptor() ([]byte, []int) {
+ return file_datadog_trace_stats_proto_rawDescGZIP(), []int{0}
+}
+
// StatsPayload is the payload used to send stats from the agent to the backend.
type StatsPayload struct {
state protoimpl.MessageState
@@ -374,7 +423,8 @@ type ClientGroupedStats struct {
SpanKind string `protobuf:"bytes,15,opt,name=span_kind,json=spanKind,proto3" json:"span_kind,omitempty"` // value of the span.kind tag on the span
// peer_tags are supplementary tags that further describe a peer entity
// E.g., `grpc.target` to describe the name of a gRPC peer, or `db.hostname` to describe the name of peer DB
- PeerTags []string `protobuf:"bytes,16,rep,name=peer_tags,json=peerTags,proto3" json:"peer_tags,omitempty"`
+ PeerTags []string `protobuf:"bytes,16,rep,name=peer_tags,json=peerTags,proto3" json:"peer_tags,omitempty"`
+ IsTraceRoot TraceRootFlag `protobuf:"varint,17,opt,name=is_trace_root,json=isTraceRoot,proto3,enum=datadog.trace.TraceRootFlag" json:"is_trace_root,omitempty"` // this field's value is equal to span's ParentID == 0.
}
func (x *ClientGroupedStats) Reset() {
@@ -514,6 +564,13 @@ func (x *ClientGroupedStats) GetPeerTags() []string {
return nil
}
+func (x *ClientGroupedStats) GetIsTraceRoot() TraceRootFlag {
+ if x != nil {
+ return x.IsTraceRoot
+ }
+ return TraceRootFlag_NOT_SET
+}
+
var File_datadog_trace_stats_proto protoreflect.FileDescriptor
var file_datadog_trace_stats_proto_rawDesc = []byte{
@@ -574,7 +631,7 @@ var file_datadog_trace_stats_proto_rawDesc = []byte{
0x6f, 0x75, 0x70, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74,
0x73, 0x12, 0x26, 0x0a, 0x0e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x68,
0x69, 0x66, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x61, 0x67, 0x65, 0x6e, 0x74,
- 0x54, 0x69, 0x6d, 0x65, 0x53, 0x68, 0x69, 0x66, 0x74, 0x22, 0xc3, 0x03, 0x0a, 0x12, 0x43, 0x6c,
+ 0x54, 0x69, 0x6d, 0x65, 0x53, 0x68, 0x69, 0x66, 0x74, 0x22, 0x85, 0x04, 0x0a, 0x12, 0x43, 0x6c,
0x69, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73,
0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
@@ -602,9 +659,17 @@ var file_datadog_trace_stats_proto_rawDesc = []byte{
0x1b, 0x0a, 0x09, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x0f, 0x20, 0x01,
0x28, 0x09, 0x52, 0x08, 0x73, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1b, 0x0a, 0x09,
0x70, 0x65, 0x65, 0x72, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x08, 0x70, 0x65, 0x65, 0x72, 0x54, 0x61, 0x67, 0x73, 0x4a, 0x04, 0x08, 0x0e, 0x10, 0x0f, 0x42,
- 0x16, 0x5a, 0x14, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67,
- 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x08, 0x70, 0x65, 0x65, 0x72, 0x54, 0x61, 0x67, 0x73, 0x12, 0x40, 0x0a, 0x0d, 0x69, 0x73, 0x5f,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x1c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65,
+ 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x46, 0x6c, 0x61, 0x67, 0x52, 0x0b,
+ 0x69, 0x73, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x4a, 0x04, 0x08, 0x0e, 0x10,
+ 0x0f, 0x2a, 0x31, 0x0a, 0x0d, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x46, 0x6c,
+ 0x61, 0x67, 0x12, 0x0b, 0x0a, 0x07, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x54, 0x10, 0x00, 0x12,
+ 0x08, 0x0a, 0x04, 0x54, 0x52, 0x55, 0x45, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x41, 0x4c,
+ 0x53, 0x45, 0x10, 0x02, 0x42, 0x16, 0x5a, 0x14, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -619,22 +684,25 @@ func file_datadog_trace_stats_proto_rawDescGZIP() []byte {
return file_datadog_trace_stats_proto_rawDescData
}
+var file_datadog_trace_stats_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_datadog_trace_stats_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_datadog_trace_stats_proto_goTypes = []interface{}{
- (*StatsPayload)(nil), // 0: datadog.trace.StatsPayload
- (*ClientStatsPayload)(nil), // 1: datadog.trace.ClientStatsPayload
- (*ClientStatsBucket)(nil), // 2: datadog.trace.ClientStatsBucket
- (*ClientGroupedStats)(nil), // 3: datadog.trace.ClientGroupedStats
+ (TraceRootFlag)(0), // 0: datadog.trace.TraceRootFlag
+ (*StatsPayload)(nil), // 1: datadog.trace.StatsPayload
+ (*ClientStatsPayload)(nil), // 2: datadog.trace.ClientStatsPayload
+ (*ClientStatsBucket)(nil), // 3: datadog.trace.ClientStatsBucket
+ (*ClientGroupedStats)(nil), // 4: datadog.trace.ClientGroupedStats
}
var file_datadog_trace_stats_proto_depIdxs = []int32{
- 1, // 0: datadog.trace.StatsPayload.stats:type_name -> datadog.trace.ClientStatsPayload
- 2, // 1: datadog.trace.ClientStatsPayload.stats:type_name -> datadog.trace.ClientStatsBucket
- 3, // 2: datadog.trace.ClientStatsBucket.stats:type_name -> datadog.trace.ClientGroupedStats
- 3, // [3:3] is the sub-list for method output_type
- 3, // [3:3] is the sub-list for method input_type
- 3, // [3:3] is the sub-list for extension type_name
- 3, // [3:3] is the sub-list for extension extendee
- 0, // [0:3] is the sub-list for field type_name
+ 2, // 0: datadog.trace.StatsPayload.stats:type_name -> datadog.trace.ClientStatsPayload
+ 3, // 1: datadog.trace.ClientStatsPayload.stats:type_name -> datadog.trace.ClientStatsBucket
+ 4, // 2: datadog.trace.ClientStatsBucket.stats:type_name -> datadog.trace.ClientGroupedStats
+ 0, // 3: datadog.trace.ClientGroupedStats.is_trace_root:type_name -> datadog.trace.TraceRootFlag
+ 4, // [4:4] is the sub-list for method output_type
+ 4, // [4:4] is the sub-list for method input_type
+ 4, // [4:4] is the sub-list for extension type_name
+ 4, // [4:4] is the sub-list for extension extendee
+ 0, // [0:4] is the sub-list for field type_name
}
func init() { file_datadog_trace_stats_proto_init() }
@@ -697,13 +765,14 @@ func file_datadog_trace_stats_proto_init() {
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_datadog_trace_stats_proto_rawDesc,
- NumEnums: 0,
+ NumEnums: 1,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_datadog_trace_stats_proto_goTypes,
DependencyIndexes: file_datadog_trace_stats_proto_depIdxs,
+ EnumInfos: file_datadog_trace_stats_proto_enumTypes,
MessageInfos: file_datadog_trace_stats_proto_msgTypes,
}.Build()
File_datadog_trace_stats_proto = out.File
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_gen.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_gen.go
index 2975ea578f..2f594f9f81 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_gen.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_gen.go
@@ -127,6 +127,16 @@ func (z *ClientGroupedStats) DecodeMsg(dc *msgp.Reader) (err error) {
return
}
}
+ case "IsTraceRoot":
+ {
+ var zb0003 int32
+ zb0003, err = dc.ReadInt32()
+ if err != nil {
+ err = msgp.WrapError(err, "IsTraceRoot")
+ return
+ }
+ z.IsTraceRoot = TraceRootFlag(zb0003)
+ }
default:
err = dc.Skip()
if err != nil {
@@ -140,9 +150,9 @@ func (z *ClientGroupedStats) DecodeMsg(dc *msgp.Reader) (err error) {
// EncodeMsg implements msgp.Encodable
func (z *ClientGroupedStats) EncodeMsg(en *msgp.Writer) (err error) {
- // map header, size 15
+ // map header, size 16
// write "Service"
- err = en.Append(0x8f, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65)
+ err = en.Append(0xde, 0x0, 0x10, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65)
if err != nil {
return
}
@@ -298,15 +308,25 @@ func (z *ClientGroupedStats) EncodeMsg(en *msgp.Writer) (err error) {
return
}
}
+ // write "IsTraceRoot"
+ err = en.Append(0xab, 0x49, 0x73, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x6f, 0x74)
+ if err != nil {
+ return
+ }
+ err = en.WriteInt32(int32(z.IsTraceRoot))
+ if err != nil {
+ err = msgp.WrapError(err, "IsTraceRoot")
+ return
+ }
return
}
// MarshalMsg implements msgp.Marshaler
func (z *ClientGroupedStats) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
- // map header, size 15
+ // map header, size 16
// string "Service"
- o = append(o, 0x8f, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65)
+ o = append(o, 0xde, 0x0, 0x10, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65)
o = msgp.AppendString(o, z.Service)
// string "Name"
o = append(o, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
@@ -353,6 +373,9 @@ func (z *ClientGroupedStats) MarshalMsg(b []byte) (o []byte, err error) {
for za0001 := range z.PeerTags {
o = msgp.AppendString(o, z.PeerTags[za0001])
}
+ // string "IsTraceRoot"
+ o = append(o, 0xab, 0x49, 0x73, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x6f, 0x74)
+ o = msgp.AppendInt32(o, int32(z.IsTraceRoot))
return
}
@@ -477,6 +500,16 @@ func (z *ClientGroupedStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
}
+ case "IsTraceRoot":
+ {
+ var zb0003 int32
+ zb0003, bts, err = msgp.ReadInt32Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "IsTraceRoot")
+ return
+ }
+ z.IsTraceRoot = TraceRootFlag(zb0003)
+ }
default:
bts, err = msgp.Skip(bts)
if err != nil {
@@ -491,10 +524,11 @@ func (z *ClientGroupedStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *ClientGroupedStats) Msgsize() (s int) {
- s = 1 + 8 + msgp.StringPrefixSize + len(z.Service) + 5 + msgp.StringPrefixSize + len(z.Name) + 9 + msgp.StringPrefixSize + len(z.Resource) + 15 + msgp.Uint32Size + 5 + msgp.StringPrefixSize + len(z.Type) + 7 + msgp.StringPrefixSize + len(z.DBType) + 5 + msgp.Uint64Size + 7 + msgp.Uint64Size + 9 + msgp.Uint64Size + 10 + msgp.BytesPrefixSize + len(z.OkSummary) + 13 + msgp.BytesPrefixSize + len(z.ErrorSummary) + 11 + msgp.BoolSize + 13 + msgp.Uint64Size + 9 + msgp.StringPrefixSize + len(z.SpanKind) + 9 + msgp.ArrayHeaderSize
+ s = 3 + 8 + msgp.StringPrefixSize + len(z.Service) + 5 + msgp.StringPrefixSize + len(z.Name) + 9 + msgp.StringPrefixSize + len(z.Resource) + 15 + msgp.Uint32Size + 5 + msgp.StringPrefixSize + len(z.Type) + 7 + msgp.StringPrefixSize + len(z.DBType) + 5 + msgp.Uint64Size + 7 + msgp.Uint64Size + 9 + msgp.Uint64Size + 10 + msgp.BytesPrefixSize + len(z.OkSummary) + 13 + msgp.BytesPrefixSize + len(z.ErrorSummary) + 11 + msgp.BoolSize + 13 + msgp.Uint64Size + 9 + msgp.StringPrefixSize + len(z.SpanKind) + 9 + msgp.ArrayHeaderSize
for za0001 := range z.PeerTags {
s += msgp.StringPrefixSize + len(z.PeerTags[za0001])
}
+ s += 12 + msgp.Int32Size
return
}
@@ -1704,3 +1738,55 @@ func (z *StatsPayload) Msgsize() (s int) {
s += 13 + msgp.StringPrefixSize + len(z.AgentVersion) + 15 + msgp.BoolSize + 13 + msgp.BoolSize
return
}
+
+// DecodeMsg implements msgp.Decodable
+func (z *TraceRootFlag) DecodeMsg(dc *msgp.Reader) (err error) {
+ {
+ var zb0001 int32
+ zb0001, err = dc.ReadInt32()
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ (*z) = TraceRootFlag(zb0001)
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z TraceRootFlag) EncodeMsg(en *msgp.Writer) (err error) {
+ err = en.WriteInt32(int32(z))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z TraceRootFlag) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ o = msgp.AppendInt32(o, int32(z))
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *TraceRootFlag) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ {
+ var zb0001 int32
+ zb0001, bts, err = msgp.ReadInt32Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ (*z) = TraceRootFlag(zb0001)
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z TraceRootFlag) Msgsize() (s int) {
+ s = msgp.Int32Size
+ return
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_vtproto.pb.go b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_vtproto.pb.go
index c29eadf308..16eefd4d30 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_vtproto.pb.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace/stats_vtproto.pb.go
@@ -329,6 +329,13 @@ func (m *ClientGroupedStats) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if m.IsTraceRoot != 0 {
+ i = encodeVarint(dAtA, i, uint64(m.IsTraceRoot))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x88
+ }
if len(m.PeerTags) > 0 {
for iNdEx := len(m.PeerTags) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.PeerTags[iNdEx])
@@ -624,6 +631,9 @@ func (m *ClientGroupedStats) SizeVT() (n int) {
n += 2 + l + sov(uint64(l))
}
}
+ if m.IsTraceRoot != 0 {
+ n += 2 + sov(uint64(m.IsTraceRoot))
+ }
n += len(m.unknownFields)
return n
}
@@ -1915,6 +1925,25 @@ func (m *ClientGroupedStats) UnmarshalVT(dAtA []byte) error {
}
m.PeerTags = append(m.PeerTags, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
+ case 17:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IsTraceRoot", wireType)
+ }
+ m.IsTraceRoot = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.IsTraceRoot |= TraceRootFlag(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/products.go b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/products.go
index f6e29072b0..fa2bd77d29 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/products.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/products.go
@@ -17,11 +17,14 @@ var validProducts = map[string]struct{}{
ProductCWSDD: {},
ProductCWSCustom: {},
ProductCWSProfiles: {},
+ ProductCSMSideScanning: {},
ProductASM: {},
ProductASMFeatures: {},
ProductASMDD: {},
ProductASMData: {},
ProductAPMTracing: {},
+ ProductSDSRules: {},
+ ProductSDSAgentConfig: {},
ProductLiveDebugging: {},
ProductTesting1: {},
ProductTesting2: {},
@@ -50,6 +53,8 @@ const (
ProductCWSCustom = "CWS_CUSTOM"
// ProductCWSProfiles is the cloud workload security profile product
ProductCWSProfiles = "CWS_SECURITY_PROFILES"
+ // ProductCSMSideScanning is the side scanning product
+ ProductCSMSideScanning = "CSM_SIDE_SCANNING"
// ProductASM is the ASM product used by customers to issue rules configurations
ProductASM = "ASM"
// ProductASMFeatures is the ASM product used form ASM activation through remote config
@@ -60,6 +65,10 @@ const (
ProductASMData = "ASM_DATA"
// ProductAPMTracing is the apm tracing product
ProductAPMTracing = "APM_TRACING"
+ // ProductSDSRules is the SDS definitions product
+ ProductSDSRules = "SDS_RULES_DD"
+ // ProductSDSAgentConfig is the user SDS configurations product.
+ ProductSDSAgentConfig = "SDS_AGENT_CONFIG"
// ProductLiveDebugging is the dynamic instrumentation product
ProductLiveDebugging = "LIVE_DEBUGGING"
// ProductTesting1 is a product used for testing remote config
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/status/health/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/status/health/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/status/health/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/status/health/README.md b/vendor/github.com/DataDog/datadog-agent/pkg/status/health/README.md
new file mode 100644
index 0000000000..245cc52b00
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/status/health/README.md
@@ -0,0 +1,30 @@
+## package `health`
+
+The `health` package handles internal healthchecks for the agents, that allow to
+check every asynchronous component is running as intended.
+
+For more information on the context, see the `agent-healthcheck.md` proposal.
+
+### How to add a component?
+
+- First, you need to register, by calling `health.Register` with a user-visible name. You will
+receive a `*health.Handle` to keep. As soon as `Register` is called, you need to start reading
+the channel to be considered healthy.
+
+- In your main goroutine, you need to read from the `handle.C` channel, at least every 15 seconds.
+This is accomplished by using a `select` statement in your main goroutine. If the channel is full
+(after two tries), your component will be considered unhealthy, which might result in the agent
+getting killed by the system.
+
+- If your component is stopping, it should call `handle.Deregister()` before stopping. It will
+then be removed from the healthcheck system.
+
+### Where should I tick?
+
+It depends on your component lifecycle, but the check's purpose is to check that your component
+is able to process new input and act accordingly. For components that read input from a channel,
+you should read the channel from this logic.
+
+This is usually hightly unprobable, but it's exactly the scope of this system: be able to
+detect if a component is frozen because of a bug / race condition. This is usually the only
+kind of issue that could be solved by the agent restarting.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/status/health/global.go b/vendor/github.com/DataDog/datadog-agent/pkg/status/health/global.go
new file mode 100644
index 0000000000..e6ce74c5b8
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/status/health/global.go
@@ -0,0 +1,74 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package health
+
+import (
+ "errors"
+ "time"
+)
+
+var readinessAndLivenessCatalog = newCatalog()
+var readinessOnlyCatalog = newCatalog()
+
+// RegisterReadiness registers a component for readiness check with the default 30 seconds timeout, returns a token
+func RegisterReadiness(name string) *Handle {
+ return readinessOnlyCatalog.register(name)
+}
+
+// RegisterLiveness registers a component for liveness check with the default 30 seconds timeout, returns a token
+func RegisterLiveness(name string) *Handle {
+ return readinessAndLivenessCatalog.register(name)
+}
+
+// Deregister a component from the healthcheck
+func Deregister(handle *Handle) error {
+ if readinessAndLivenessCatalog.deregister(handle) == nil {
+ return nil
+ }
+ return readinessOnlyCatalog.deregister(handle)
+}
+
+// GetLive returns health of all components registered for liveness
+func GetLive() Status {
+ return readinessAndLivenessCatalog.getStatus()
+}
+
+// GetReady returns health of all components registered for both readiness and liveness
+func GetReady() (ret Status) {
+ liveStatus := readinessAndLivenessCatalog.getStatus()
+ readyStatus := readinessOnlyCatalog.getStatus()
+ ret.Healthy = append(liveStatus.Healthy, readyStatus.Healthy...)
+ ret.Unhealthy = append(liveStatus.Unhealthy, readyStatus.Unhealthy...)
+ return
+}
+
+// getStatusNonBlocking allows to query the health status of the agent
+// and is guaranteed to return under 500ms.
+func getStatusNonBlocking(getStatus func() Status) (Status, error) {
+ // Run the health status in a goroutine
+ ch := make(chan Status, 1)
+ go func() {
+ ch <- getStatus()
+ }()
+
+ // Only wait 500ms before returning
+ select {
+ case status := <-ch:
+ return status, nil
+ case <-time.After(500 * time.Millisecond):
+ return Status{}, errors.New("timeout when getting health status")
+ }
+}
+
+// GetLiveNonBlocking returns the health of all components registered for liveness with a 500ms timeout
+func GetLiveNonBlocking() (Status, error) {
+ return getStatusNonBlocking(GetLive)
+}
+
+// GetReadyNonBlocking returns the health of all components registered for both readiness and liveness with a 500ms timeout
+func GetReadyNonBlocking() (Status, error) {
+ return getStatusNonBlocking(GetReady)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/status/health/health.go b/vendor/github.com/DataDog/datadog-agent/pkg/status/health/health.go
new file mode 100644
index 0000000000..4540034547
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/status/health/health.go
@@ -0,0 +1,158 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package health implements the internal healthcheck
+package health
+
+import (
+ "errors"
+ "sync"
+ "time"
+)
+
+var pingFrequency = 15 * time.Second
+var bufferSize = 2
+
+// Handle holds the token and the channel for components to use
+type Handle struct {
+ C <-chan time.Time
+}
+
+// Deregister allows a component to easily deregister itself
+func (h *Handle) Deregister() error {
+ return Deregister(h)
+}
+
+type component struct {
+ name string
+ healthChan chan time.Time
+ healthy bool
+}
+
+type catalog struct {
+ sync.RWMutex
+ components map[*Handle]*component
+ latestRun time.Time
+}
+
+func newCatalog() *catalog {
+ return &catalog{
+ components: make(map[*Handle]*component),
+ latestRun: time.Now(), // Start healthy
+ }
+}
+
+// register a component with the default 30 seconds timeout, returns a token
+func (c *catalog) register(name string) *Handle {
+ c.Lock()
+ defer c.Unlock()
+
+ if len(c.components) == 0 {
+ go c.run()
+ }
+
+ component := &component{
+ name: name,
+ healthChan: make(chan time.Time, bufferSize),
+ healthy: false,
+ }
+ h := &Handle{
+ C: component.healthChan,
+ }
+
+ // Start with a full channel to component is unhealthy until its first read
+ for i := 0; i < bufferSize; i++ {
+ component.healthChan <- time.Now().Add(pingFrequency)
+ }
+
+ c.components[h] = component
+ return h
+}
+
+// run is the healthcheck goroutine that triggers a ping every 15 sec
+// it must be started when the first component registers, and will
+// return if no components are registered anymore
+func (c *catalog) run() {
+ pingTicker := time.NewTicker(pingFrequency)
+
+ for {
+ t := <-pingTicker.C
+ empty := c.pingComponents(t.Add(mulDuration(pingFrequency, bufferSize)))
+ if empty {
+ break
+ }
+ }
+ pingTicker.Stop()
+}
+
+func mulDuration(d time.Duration, x int) time.Duration {
+ return time.Duration(int64(d) * int64(x))
+}
+
+// pingComponents is the actual pinging logic, separated for unit tests.
+// Returns true if the component list is empty, to make the pooling logic stop.
+func (c *catalog) pingComponents(healthDeadline time.Time) bool {
+ c.Lock()
+ defer c.Unlock()
+ for _, component := range c.components {
+ select {
+ case component.healthChan <- healthDeadline:
+ component.healthy = true
+ default:
+ component.healthy = false
+ }
+ }
+ c.latestRun = time.Now()
+ return len(c.components) == 0
+}
+
+// deregister a component from the healthcheck
+func (c *catalog) deregister(handle *Handle) error {
+ c.Lock()
+ defer c.Unlock()
+ if _, found := c.components[handle]; !found {
+ return errors.New("component not registered")
+ }
+ close(c.components[handle].healthChan)
+ delete(c.components, handle)
+ return nil
+}
+
+// Status represents the current status of registered components
+// it is built and returned by GetStatus()
+type Status struct {
+ Healthy []string
+ Unhealthy []string
+}
+
+// getStatus allows to query the health status of the agent
+func (c *catalog) getStatus() Status {
+ status := Status{}
+ c.RLock()
+ defer c.RUnlock()
+
+ // If no component registered, do not check anything, not even the checker itself
+ // as the `run()` function exits in such a case.
+ if len(c.components) == 0 {
+ return status
+ }
+
+ // Test the checker itself
+ if time.Now().After(c.latestRun.Add(2 * pingFrequency)) {
+ status.Unhealthy = append(status.Unhealthy, "healthcheck")
+ } else {
+ status.Healthy = append(status.Healthy, "healthcheck")
+ }
+
+ // Check components
+ for _, component := range c.components {
+ if component.healthy {
+ status.Healthy = append(status.Healthy, component.name)
+ } else {
+ status.Unhealthy = append(status.Unhealthy, component.name)
+ }
+ }
+ return status
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/counter.go b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/counter.go
new file mode 100644
index 0000000000..0a793d3055
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/counter.go
@@ -0,0 +1,27 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetry
+
+import (
+ telemetryComponent "github.com/DataDog/datadog-agent/comp/core/telemetry"
+)
+
+// Counter tracks how many times something is happening.
+type Counter interface {
+ telemetryComponent.Counter
+}
+
+// NewCounter creates a Counter with default options for telemetry purpose.
+// Current implementation used: Prometheus Counter
+func NewCounter(subsystem, name string, tags []string, help string) Counter {
+ return NewCounterWithOpts(subsystem, name, tags, help, DefaultOptions)
+}
+
+// NewCounterWithOpts creates a Counter with the given options for telemetry purpose.
+// See NewCounter()
+func NewCounterWithOpts(subsystem, name string, tags []string, help string, opts Options) Counter {
+ return GetCompatComponent().NewCounterWithOpts(subsystem, name, tags, help, telemetryComponent.Options(opts))
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/docs.go b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/docs.go
new file mode 100644
index 0000000000..686564ca9e
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/docs.go
@@ -0,0 +1,7 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package telemetry defines the agent internal telemetry
+package telemetry
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/gauge.go b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/gauge.go
new file mode 100644
index 0000000000..1327cc5edd
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/gauge.go
@@ -0,0 +1,27 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetry
+
+import (
+ telemetryComponent "github.com/DataDog/datadog-agent/comp/core/telemetry"
+)
+
+// Gauge tracks the value of one health metric of the Agent.
+type Gauge interface {
+ telemetryComponent.Gauge
+}
+
+// NewGauge creates a Gauge with default options for telemetry purpose.
+// Current implementation used: Prometheus Gauge
+func NewGauge(subsystem, name string, tags []string, help string) Gauge {
+ return NewGaugeWithOpts(subsystem, name, tags, help, DefaultOptions)
+}
+
+// NewGaugeWithOpts creates a Gauge with the given options for telemetry purpose.
+// See NewGauge()
+func NewGaugeWithOpts(subsystem, name string, tags []string, help string, opts Options) Gauge {
+ return GetCompatComponent().NewGaugeWithOpts(subsystem, name, tags, help, telemetryComponent.Options(opts))
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/histogram.go b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/histogram.go
new file mode 100644
index 0000000000..a35d3564ec
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/histogram.go
@@ -0,0 +1,41 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetry
+
+import (
+ telemetryComponent "github.com/DataDog/datadog-agent/comp/core/telemetry"
+)
+
+// Histogram tracks the value of one health metric of the Agent.
+type Histogram interface {
+ telemetryComponent.Histogram
+}
+
+type histogramNoOp struct{}
+
+func (h histogramNoOp) Observe(_ float64, _ ...string) {}
+func (h histogramNoOp) Delete(_ ...string) {}
+func (h histogramNoOp) WithValues(tagsValue ...string) telemetryComponent.SimpleHistogram { return nil } //nolint:revive // TODO fix revive unused-parameter
+func (h histogramNoOp) WithTags(tags map[string]string) telemetryComponent.SimpleHistogram { //nolint:revive // TODO fix revive unused-parameter
+ return nil
+}
+
+// NewHistogramNoOp creates a dummy Histogram
+func NewHistogramNoOp() Histogram {
+ return histogramNoOp{}
+}
+
+// NewHistogram creates a Histogram with default options for telemetry purpose.
+// Current implementation used: Prometheus Histogram
+func NewHistogram(subsystem, name string, tags []string, help string, buckets []float64) Histogram {
+ return NewHistogramWithOpts(subsystem, name, tags, help, buckets, DefaultOptions)
+}
+
+// NewHistogramWithOpts creates a Histogram with the given options for telemetry purpose.
+// See NewHistogram()
+func NewHistogramWithOpts(subsystem, name string, tags []string, help string, buckets []float64, opts Options) Histogram {
+ return GetCompatComponent().NewHistogramWithOpts(subsystem, name, tags, help, buckets, telemetryComponent.Options(opts))
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/options.go b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/options.go
new file mode 100644
index 0000000000..50aa7600db
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/options.go
@@ -0,0 +1,18 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetry
+
+import (
+ telemetryComponent "github.com/DataDog/datadog-agent/comp/core/telemetry"
+)
+
+// Options for telemetry metrics.
+// Creating an Options struct without specifying any of its fields should be the
+// equivalent of using the DefaultOptions var.
+type Options telemetryComponent.Options
+
+// DefaultOptions for telemetry metrics which don't need to specify any option.
+var DefaultOptions Options = Options(telemetryComponent.DefaultOptions)
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/simple_counter.go b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/simple_counter.go
new file mode 100644
index 0000000000..3b4dfc1af9
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/simple_counter.go
@@ -0,0 +1,25 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetry
+
+import (
+ telemetryComponent "github.com/DataDog/datadog-agent/comp/core/telemetry"
+)
+
+// SimpleCounter tracks how many times something is happening.
+type SimpleCounter interface {
+ telemetryComponent.SimpleCounter
+}
+
+// NewSimpleCounter creates a new SimpleCounter with default options.
+func NewSimpleCounter(subsystem, name, help string) SimpleCounter {
+ return NewSimpleCounterWithOpts(subsystem, name, help, DefaultOptions)
+}
+
+// NewSimpleCounterWithOpts creates a new SimpleCounter.
+func NewSimpleCounterWithOpts(subsystem, name, help string, opts Options) SimpleCounter {
+ return GetCompatComponent().NewSimpleCounterWithOpts(subsystem, name, help, telemetryComponent.Options(opts))
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/simple_gauge.go b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/simple_gauge.go
new file mode 100644
index 0000000000..8195008972
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/simple_gauge.go
@@ -0,0 +1,25 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetry
+
+import (
+ telemetryComponent "github.com/DataDog/datadog-agent/comp/core/telemetry"
+)
+
+// SimpleGauge tracks how many times something is happening.
+type SimpleGauge interface {
+ telemetryComponent.SimpleGauge
+}
+
+// NewSimpleGauge creates a new SimpleGauge with default options.
+func NewSimpleGauge(subsystem, name, help string) SimpleGauge {
+ return NewSimpleGaugeWithOpts(subsystem, name, help, DefaultOptions)
+}
+
+// NewSimpleGaugeWithOpts creates a new SimpleGauge.
+func NewSimpleGaugeWithOpts(subsystem, name, help string, opts Options) SimpleGauge {
+ return GetCompatComponent().NewSimpleGaugeWithOpts(subsystem, name, help, telemetryComponent.Options(opts))
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/simple_histogram.go b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/simple_histogram.go
new file mode 100644
index 0000000000..cb59af42b6
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/simple_histogram.go
@@ -0,0 +1,25 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetry
+
+import (
+ telemetryComponent "github.com/DataDog/datadog-agent/comp/core/telemetry"
+)
+
+// SimpleHistogram tracks how many times something is happening.
+type SimpleHistogram interface {
+ telemetryComponent.SimpleHistogram
+}
+
+// NewSimpleHistogram creates a new SimpleHistogram with default options.
+func NewSimpleHistogram(subsystem, name, help string, buckets []float64) SimpleHistogram {
+ return NewSimpleHistogramWithOpts(subsystem, name, help, buckets, DefaultOptions)
+}
+
+// NewSimpleHistogramWithOpts creates a new SimpleHistogram.
+func NewSimpleHistogramWithOpts(subsystem, name, help string, buckets []float64, opts Options) SimpleHistogram {
+ return GetCompatComponent().NewSimpleHistogramWithOpts(subsystem, name, help, buckets, telemetryComponent.Options(opts))
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/stat_counter_wrapper.go b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/stat_counter_wrapper.go
new file mode 100644
index 0000000000..cb0955dad6
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/stat_counter_wrapper.go
@@ -0,0 +1,49 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package telemetry provides types and functions for internal telemetry
+package telemetry
+
+import (
+ "go.uber.org/atomic"
+)
+
+// StatCounterWrapper is a convenience type that allows for migrating telemetry to
+// prometheus Counters while continuing to make the underlying values available for reading
+type StatCounterWrapper struct {
+ stat *atomic.Int64
+ counter Counter
+}
+
+// Inc increments the counter with the given tags value.
+func (sgw *StatCounterWrapper) Inc(tags ...string) {
+ sgw.stat.Inc()
+ sgw.counter.Inc(tags...)
+}
+
+// Delete deletes the value for the counter with the given tags value.
+func (sgw *StatCounterWrapper) Delete() {
+ sgw.stat.Store(0)
+ sgw.counter.Delete()
+}
+
+// Add adds the given value to the counter with the given tags value.
+func (sgw *StatCounterWrapper) Add(v int64, tags ...string) {
+ sgw.stat.Add(v)
+ sgw.counter.Add(float64(v), tags...)
+}
+
+// Load atomically loads the wrapped value.
+func (sgw *StatCounterWrapper) Load() int64 {
+ return sgw.stat.Load()
+}
+
+// NewStatCounterWrapper returns a new StatCounterWrapper
+func NewStatCounterWrapper(subsystem string, statName string, tags []string, description string) *StatCounterWrapper {
+ return &StatCounterWrapper{
+ stat: atomic.NewInt64(0),
+ counter: NewCounter(subsystem, statName, tags, description),
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/stat_gauge_wrapper.go b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/stat_gauge_wrapper.go
new file mode 100644
index 0000000000..4fcbc37014
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/stat_gauge_wrapper.go
@@ -0,0 +1,54 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package telemetry
+
+import (
+ "go.uber.org/atomic"
+)
+
+// StatGaugeWrapper is a convenience type that allows for migrating telemetry to
+// prometheus Gauges while continuing to make the underlying values available for reading
+type StatGaugeWrapper struct {
+ stat *atomic.Int64
+ gauge Gauge
+}
+
+// Inc increments the Gauge value.
+func (sgw *StatGaugeWrapper) Inc() {
+ sgw.stat.Inc()
+ sgw.gauge.Inc()
+}
+
+// Dec decrements the Gauge value.
+func (sgw *StatGaugeWrapper) Dec() {
+ sgw.stat.Dec()
+ sgw.gauge.Dec()
+}
+
+// Add adds the value to the Gauge value.
+func (sgw *StatGaugeWrapper) Add(v int64) {
+ sgw.stat.Add(v)
+ sgw.gauge.Add(float64(v))
+}
+
+// Set stores the value for the given tags.
+func (sgw *StatGaugeWrapper) Set(v int64) {
+ sgw.stat.Store(v)
+ sgw.gauge.Set(float64(v))
+}
+
+// Load atomically loads the wrapped value.
+func (sgw *StatGaugeWrapper) Load() int64 {
+ return sgw.stat.Load()
+}
+
+// NewStatGaugeWrapper returns a new StatGaugeWrapper
+func NewStatGaugeWrapper(subsystem string, statName string, tags []string, description string) *StatGaugeWrapper {
+ return &StatGaugeWrapper{
+ stat: atomic.NewInt64(0),
+ gauge: NewGauge(subsystem, statName, tags, description),
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/stats_telemetry.go b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/stats_telemetry.go
new file mode 100644
index 0000000000..f030586d58
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/stats_telemetry.go
@@ -0,0 +1,67 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2021-present Datadog, Inc.
+
+package telemetry
+
+import "sync"
+
+// StatsTelemetrySender contains methods needed for sending stats metrics
+type StatsTelemetrySender interface {
+ Count(metric string, value float64, hostname string, tags []string)
+ Gauge(metric string, value float64, hostname string, tags []string)
+ GaugeNoIndex(metric string, value float64, hostname string, tags []string)
+}
+
+// StatsTelemetryProvider handles stats telemetry and passes it on to a sender
+type StatsTelemetryProvider struct {
+ sender StatsTelemetrySender
+ m sync.RWMutex
+}
+
+var (
+ statsProvider = &StatsTelemetryProvider{}
+)
+
+// NewStatsTelemetryProvider creates a new instance of StatsTelemetryProvider
+func NewStatsTelemetryProvider(sender StatsTelemetrySender) *StatsTelemetryProvider {
+ return &StatsTelemetryProvider{sender: sender}
+}
+
+// RegisterStatsSender regsiters a sender to send the stats metrics
+func RegisterStatsSender(sender StatsTelemetrySender) {
+ statsProvider.m.Lock()
+ defer statsProvider.m.Unlock()
+ statsProvider.sender = sender
+}
+
+// GetStatsTelemetryProvider gets an instance of the current stats telemetry provider
+func GetStatsTelemetryProvider() *StatsTelemetryProvider {
+ return statsProvider
+}
+
+// Count reports a count metric to the sender
+func (s *StatsTelemetryProvider) Count(metric string, value float64, tags []string) {
+ s.send(func(sender StatsTelemetrySender) { sender.Count(metric, value, "", tags) })
+}
+
+// Gauge reports a gauge metric to the sender
+func (s *StatsTelemetryProvider) Gauge(metric string, value float64, tags []string) {
+ s.send(func(sender StatsTelemetrySender) { sender.Gauge(metric, value, "", tags) })
+}
+
+// GaugeNoIndex reports a gauge metric not indexed to the sender
+func (s *StatsTelemetryProvider) GaugeNoIndex(metric string, value float64, tags []string) {
+ s.send(func(sender StatsTelemetrySender) { sender.GaugeNoIndex(metric, value, "", tags) })
+}
+
+func (s *StatsTelemetryProvider) send(senderFct func(sender StatsTelemetrySender)) {
+ s.m.RLock()
+ defer s.m.RUnlock()
+ if s.sender == nil {
+ return
+ }
+
+ senderFct(s.sender)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/telemetry.go b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/telemetry.go
new file mode 100644
index 0000000000..09a13f8370
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/telemetry.go
@@ -0,0 +1,18 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+//go:build !serverless
+
+package telemetry
+
+import (
+ "github.com/DataDog/datadog-agent/comp/core/telemetry"
+ "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl"
+)
+
+// GetCompatComponent returns a component wrapping telemetry global variables
+func GetCompatComponent() telemetry.Component {
+ return telemetryimpl.GetCompatComponent()
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/telemetry_noop.go b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/telemetry_noop.go
new file mode 100644
index 0000000000..b7d95a998e
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/telemetry/telemetry_noop.go
@@ -0,0 +1,18 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+//go:build serverless
+
+package telemetry
+
+import (
+ "github.com/DataDog/datadog-agent/comp/core/telemetry"
+ "github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl"
+)
+
+// GetCompatComponent returns a component wrapping telemetry global variables
+func GetCompatComponent() telemetry.Component {
+ return noopsimpl.GetCompatComponent()
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/agent.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/agent.go
index b9e3ea8364..bc9d36c103 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/agent.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/agent.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//nolint:revive // TODO(APM) Fix revive linter
+// Package agent implements the trace-agent.
package agent
import (
@@ -47,6 +47,9 @@ const (
// manualSampling is the value for _dd.p.dm when user sets sampling priority directly in code.
manualSampling = "-4"
+ // probabilitySampling is the value for _dd.p.dm when the agent is configured to use the ProbabilitySampler.
+ probabilitySampling = "-9"
+
// tagDecisionMaker specifies the sampling decision maker
tagDecisionMaker = "_dd.p.dm"
)
@@ -63,6 +66,7 @@ type Agent struct {
ErrorsSampler *sampler.ErrorsSampler
RareSampler *sampler.RareSampler
NoPrioritySampler *sampler.NoPrioritySampler
+ ProbabilisticSampler *sampler.ProbabilisticSampler
EventProcessor *event.Processor
TraceWriter *writer.TraceWriter
StatsWriter *writer.StatsWriter
@@ -117,6 +121,7 @@ func NewAgent(ctx context.Context, conf *config.AgentConfig, telemetryCollector
ErrorsSampler: sampler.NewErrorsSampler(conf, statsd),
RareSampler: sampler.NewRareSampler(conf, statsd),
NoPrioritySampler: sampler.NewNoPrioritySampler(conf, statsd),
+ ProbabilisticSampler: sampler.NewProbabilisticSampler(conf, statsd),
EventProcessor: newEventProcessor(conf, statsd),
StatsWriter: writer.NewStatsWriter(conf, statsChan, telemetryCollector, statsd, timing),
obfuscator: obfuscate.NewObfuscator(oconf),
@@ -146,6 +151,7 @@ func (a *Agent) Run() {
a.PrioritySampler,
a.ErrorsSampler,
a.NoPrioritySampler,
+ a.ProbabilisticSampler,
a.EventProcessor,
a.OTLPReceiver,
a.RemoteConfigHandler,
@@ -218,6 +224,7 @@ func (a *Agent) loop() {
a.PrioritySampler,
a.ErrorsSampler,
a.NoPrioritySampler,
+ a.ProbabilisticSampler,
a.RareSampler,
a.EventProcessor,
a.obfuscator,
@@ -292,7 +299,7 @@ func (a *Agent) Process(p *api.Payload) {
// Root span is used to carry some trace-level metadata, such as sampling rate and priority.
root := traceutil.GetRoot(chunk.Spans)
- setChunkAttributesFromRoot(chunk, root)
+ setChunkAttributes(chunk, root)
if !a.Blacklister.Allows(root) {
log.Debugf("Trace rejected by ignore resources rules. root: %v", root)
ts.TracesFiltered.Inc()
@@ -422,14 +429,12 @@ func processedTrace(p *api.Payload, chunk *pb.TraceChunk, root *pb.Span, contain
}
// newChunksArray creates a new array which will point only to sampled chunks.
-
// The underlying array behind TracePayload.Chunks points to unsampled chunks
// preventing them from being collected by the GC.
func newChunksArray(chunks []*pb.TraceChunk) []*pb.TraceChunk {
- //nolint:revive // TODO(APM) Fix revive linter
- new := make([]*pb.TraceChunk, len(chunks))
- copy(new, chunks)
- return new
+ newChunks := make([]*pb.TraceChunk, len(chunks))
+ copy(newChunks, chunks)
+ return newChunks
}
var _ api.StatsProcessor = (*Agent)(nil)
@@ -514,17 +519,6 @@ func (a *Agent) ProcessStats(in *pb.ClientStatsPayload, lang, tracerVersion stri
a.ClientStatsAggregator.In <- a.processStats(in, lang, tracerVersion)
}
-func isManualUserDrop(priority sampler.SamplingPriority, pt *traceutil.ProcessedTrace) bool {
- if priority != sampler.PriorityUserDrop {
- return false
- }
- dm, hasDm := pt.Root.Meta[tagDecisionMaker]
- if !hasDm {
- return false
- }
- return dm == manualSampling
-}
-
// sample performs all sampling on the processedTrace modifying it as needed and returning if the trace should be kept and the number of events in the trace
func (a *Agent) sample(now time.Time, ts *info.TagStats, pt *traceutil.ProcessedTrace) (keep bool, numEvents int) {
// We have a `keep` that is different from pt's `DroppedTrace` field as `DroppedTrace` will be sent to intake.
@@ -551,10 +545,66 @@ func (a *Agent) sample(now time.Time, ts *info.TagStats, pt *traceutil.Processed
return keep, len(events)
}
+// isManualUserDrop returns true if and only if the ProcessedTrace is marked as Priority User Drop
+// AND has a sampling decision maker of "Manual Sampling" (-4)
+//
+// Note: This does not work for traces with PriorityUserDrop, since most tracers do not set
+// the decision maker field for user drop scenarios.
+func isManualUserDrop(pt *traceutil.ProcessedTrace) bool {
+ priority, _ := sampler.GetSamplingPriority(pt.TraceChunk)
+ // Default priority is non-drop, so it's safe to ignore if the priority wasn't found
+ if priority != sampler.PriorityUserDrop {
+ return false
+ }
+ dm, hasDm := pt.TraceChunk.Tags[tagDecisionMaker]
+ if !hasDm {
+ return false
+ }
+ return dm == manualSampling
+}
+
// traceSampling reports whether the chunk should be kept as a trace, setting "DroppedTrace" on the chunk
func (a *Agent) traceSampling(now time.Time, ts *info.TagStats, pt *traceutil.ProcessedTrace) (keep bool, checkAnalyticsEvents bool) {
- priority, hasPriority := sampler.GetSamplingPriority(pt.TraceChunk)
+ sampled, check := a.runSamplers(now, ts, *pt)
+ pt.TraceChunk.DroppedTrace = !sampled
+ return sampled, check
+}
+// getAnalyzedEvents returns any sampled analytics events in the ProcessedTrace
+func (a *Agent) getAnalyzedEvents(pt *traceutil.ProcessedTrace, ts *info.TagStats) []*pb.Span {
+ numEvents, numExtracted, events := a.EventProcessor.Process(pt)
+ ts.EventsExtracted.Add(numExtracted)
+ ts.EventsSampled.Add(numEvents)
+ return events
+}
+
+// runSamplers runs the agent's configured samplers on pt and returns the sampling decision along
+// with the sampling rate.
+//
+// The rare sampler is run first, catching all rare traces early. If the probabilistic sampler is
+// enabled, it is run on the trace, followed by the error sampler. Otherwise, If the trace has a
+// priority set, the sampling priority is used with the Priority Sampler. When there is no priority
+// set, the NoPrioritySampler is run. Finally, if the trace has not been sampled by the other
+// samplers, the error sampler is run.
+func (a *Agent) runSamplers(now time.Time, ts *info.TagStats, pt traceutil.ProcessedTrace) (keep bool, checkAnalyticsEvents bool) {
+ // run this early to make sure the signature gets counted by the RareSampler.
+ rare := a.RareSampler.Sample(now, pt.TraceChunk, pt.TracerEnv)
+
+ if a.conf.ProbabilisticSamplerEnabled {
+ if rare {
+ return true, true
+ }
+ if a.ProbabilisticSampler.Sample(pt.Root) {
+ pt.TraceChunk.Tags[tagDecisionMaker] = probabilitySampling
+ return true, true
+ }
+ if traceContainsError(pt.TraceChunk.Spans) {
+ return a.ErrorsSampler.Sample(now, pt.TraceChunk.Spans, pt.Root, pt.TracerEnv), true
+ }
+ return false, true
+ }
+
+ priority, hasPriority := sampler.GetSamplingPriority(pt.TraceChunk)
if hasPriority {
ts.TracesPerSamplingPriority.CountSamplingPriority(priority)
} else {
@@ -564,7 +614,7 @@ func (a *Agent) traceSampling(now time.Time, ts *info.TagStats, pt *traceutil.Pr
// We skip analytics events when a trace is marked as manual drop (aka priority -1)
// Note that we DON'T skip single span sampling. We only do this for historical
// reasons and analytics events are deprecated so hopefully this can all go away someday.
- if isManualUserDrop(priority, pt) {
+ if isManualUserDrop(&pt) {
return false, false
}
} else { // This path to be deleted once manualUserDrop detection is available on all tracers for P < 1.
@@ -572,51 +622,24 @@ func (a *Agent) traceSampling(now time.Time, ts *info.TagStats, pt *traceutil.Pr
return false, false
}
}
- sampled := a.runSamplers(now, *pt, hasPriority)
- pt.TraceChunk.DroppedTrace = !sampled
- return sampled, true
-}
-
-// getAnalyzedEvents returns any sampled analytics events in the ProcessedTrace
-func (a *Agent) getAnalyzedEvents(pt *traceutil.ProcessedTrace, ts *info.TagStats) []*pb.Span {
- numEvents, numExtracted, events := a.EventProcessor.Process(pt)
- ts.EventsExtracted.Add(numExtracted)
- ts.EventsSampled.Add(numEvents)
- return events
-}
+ if rare {
+ return true, true
+ }
-// runSamplers runs all the agent's samplers on pt and returns the sampling decision
-// along with the sampling rate.
-func (a *Agent) runSamplers(now time.Time, pt traceutil.ProcessedTrace, hasPriority bool) bool {
if hasPriority {
- return a.samplePriorityTrace(now, pt)
+ if a.PrioritySampler.Sample(now, pt.TraceChunk, pt.Root, pt.TracerEnv, pt.ClientDroppedP0sWeight) {
+ return true, true
+ }
+ } else if a.NoPrioritySampler.Sample(now, pt.TraceChunk.Spans, pt.Root, pt.TracerEnv) {
+ return true, true
}
- return a.sampleNoPriorityTrace(now, pt)
-}
-// samplePriorityTrace samples traces with priority set on them. PrioritySampler and
-// ErrorSampler are run in parallel. The RareSampler catches traces with rare top-level
-// or measured spans that are not caught by PrioritySampler and ErrorSampler.
-func (a *Agent) samplePriorityTrace(now time.Time, pt traceutil.ProcessedTrace) bool {
- // run this early to make sure the signature gets counted by the RareSampler.
- rare := a.RareSampler.Sample(now, pt.TraceChunk, pt.TracerEnv)
- if a.PrioritySampler.Sample(now, pt.TraceChunk, pt.Root, pt.TracerEnv, pt.ClientDroppedP0sWeight) {
- return true
- }
if traceContainsError(pt.TraceChunk.Spans) {
- return a.ErrorsSampler.Sample(now, pt.TraceChunk.Spans, pt.Root, pt.TracerEnv)
+ return a.ErrorsSampler.Sample(now, pt.TraceChunk.Spans, pt.Root, pt.TracerEnv), true
}
- return rare
-}
-// sampleNoPriorityTrace samples traces with no priority set on them. The traces
-// get sampled by either the score sampler or the error sampler if they have an error.
-func (a *Agent) sampleNoPriorityTrace(now time.Time, pt traceutil.ProcessedTrace) bool {
- if traceContainsError(pt.TraceChunk.Spans) {
- return a.ErrorsSampler.Sample(now, pt.TraceChunk.Spans, pt.Root, pt.TracerEnv)
- }
- return a.NoPrioritySampler.Sample(now, pt.TraceChunk.Spans, pt.Root, pt.TracerEnv)
+ return false, true
}
func traceContainsError(trace pb.Trace) bool {
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/normalizer.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/normalizer.go
index 13266b69bd..25bf18fca8 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/normalizer.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/agent/normalizer.go
@@ -172,10 +172,11 @@ func (a *Agent) normalize(ts *info.TagStats, s *pb.Span) error {
return nil
}
-// setChunkAttributesFromRoot takes a trace chunk and from the root span
+// setChunkAttributes takes a trace chunk and from the root span
// * populates Origin field if it wasn't populated
// * populates Priority field if it wasn't populated
-func setChunkAttributesFromRoot(chunk *pb.TraceChunk, root *pb.Span) {
+// * promotes the decision maker found in any internal span to a chunk tag
+func setChunkAttributes(chunk *pb.TraceChunk, root *pb.Span) {
// check if priority is already populated
if chunk.Priority == int32(sampler.PriorityNone) {
// Older tracers set sampling priority in the root span.
@@ -194,6 +195,18 @@ func setChunkAttributesFromRoot(chunk *pb.TraceChunk, root *pb.Span) {
// Older tracers set origin in the root span.
chunk.Origin = root.Meta[tagOrigin]
}
+
+ if _, ok := chunk.Tags[tagDecisionMaker]; !ok {
+ for _, span := range chunk.Spans {
+ // First span wins
+ if dm, ok := span.Meta[tagDecisionMaker]; ok {
+ chunk.Tags[tagDecisionMaker] = dm
+ break
+ }
+ // There are downstream systems that rely on this tag being on the span
+ // delete(span.Meta, tagDecisionMaker)
+ }
+ }
}
// normalizeTrace takes a trace and
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/api.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/api.go
index 0230d866ff..dcb27d8f79 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/api.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/api.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//nolint:revive // TODO(APM) Fix revive linter
+// Package api implements the HTTP server that receives payloads from clients.
package api
import (
@@ -350,11 +350,11 @@ const (
// TagStats returns the stats and tags coinciding with the information found in header.
// For more information, check the "Datadog-Meta-*" HTTP headers defined in this file.
-func (r *HTTPReceiver) TagStats(v Version, header http.Header) *info.TagStats {
- return r.tagStats(v, header)
+func (r *HTTPReceiver) TagStats(v Version, header http.Header, service string) *info.TagStats {
+ return r.tagStats(v, header, service)
}
-func (r *HTTPReceiver) tagStats(v Version, httpHeader http.Header) *info.TagStats {
+func (r *HTTPReceiver) tagStats(v Version, httpHeader http.Header, service string) *info.TagStats {
return r.Stats.GetTagStats(info.Tags{
Lang: httpHeader.Get(header.Lang),
LangVersion: httpHeader.Get(header.LangVersion),
@@ -362,6 +362,7 @@ func (r *HTTPReceiver) tagStats(v Version, httpHeader http.Header) *info.TagStat
LangVendor: httpHeader.Get(header.LangInterpreterVendor),
TracerVersion: httpHeader.Get(header.TracerVersion),
EndpointVersion: string(v),
+ Service: service,
})
}
@@ -369,7 +370,7 @@ func (r *HTTPReceiver) tagStats(v Version, httpHeader http.Header) *info.TagStat
// - tp is the decoded payload
// - ranHook reports whether the decoder was able to run the pb.MetaHook
// - err is the first error encountered
-func decodeTracerPayload(v Version, req *http.Request, ts *info.TagStats, cIDProvider IDProvider) (tp *pb.TracerPayload, ranHook bool, err error) {
+func decodeTracerPayload(v Version, req *http.Request, cIDProvider IDProvider, lang, langVersion, tracerVersion string) (tp *pb.TracerPayload, ranHook bool, err error) {
switch v {
case v01:
var spans []*pb.Span
@@ -377,11 +378,11 @@ func decodeTracerPayload(v Version, req *http.Request, ts *info.TagStats, cIDPro
return nil, false, err
}
return &pb.TracerPayload{
- LanguageName: ts.Lang,
- LanguageVersion: ts.LangVersion,
+ LanguageName: lang,
+ LanguageVersion: langVersion,
ContainerID: cIDProvider.GetContainerID(req.Context(), req.Header),
Chunks: traceChunksFromSpans(spans),
- TracerVersion: ts.TracerVersion,
+ TracerVersion: tracerVersion,
}, false, nil
case v05:
buf := getBuffer()
@@ -392,11 +393,11 @@ func decodeTracerPayload(v Version, req *http.Request, ts *info.TagStats, cIDPro
var traces pb.Traces
err = traces.UnmarshalMsgDictionary(buf.Bytes())
return &pb.TracerPayload{
- LanguageName: ts.Lang,
- LanguageVersion: ts.LangVersion,
+ LanguageName: lang,
+ LanguageVersion: langVersion,
ContainerID: cIDProvider.GetContainerID(req.Context(), req.Header),
Chunks: traceChunksFromTraces(traces),
- TracerVersion: ts.TracerVersion,
+ TracerVersion: tracerVersion,
}, true, err
case V07:
buf := getBuffer()
@@ -413,11 +414,11 @@ func decodeTracerPayload(v Version, req *http.Request, ts *info.TagStats, cIDPro
return nil, false, err
}
return &pb.TracerPayload{
- LanguageName: ts.Lang,
- LanguageVersion: ts.LangVersion,
+ LanguageName: lang,
+ LanguageVersion: langVersion,
ContainerID: cIDProvider.GetContainerID(req.Context(), req.Header),
Chunks: traceChunksFromTraces(traces),
- TracerVersion: ts.TracerVersion,
+ TracerVersion: tracerVersion,
}, ranHook, nil
}
}
@@ -446,7 +447,6 @@ type StatsProcessor interface {
func (r *HTTPReceiver) handleStats(w http.ResponseWriter, req *http.Request) {
defer r.timing.Since("datadog.trace_agent.receiver.stats_process_ms", time.Now())
- ts := r.tagStats(V07, req.Header)
rd := apiutil.NewLimitedReader(req.Body, r.conf.MaxRequestBytes)
req.Header.Set("Accept", "application/msgpack")
in := &pb.ClientStatsPayload{}
@@ -456,6 +456,14 @@ func (r *HTTPReceiver) handleStats(w http.ResponseWriter, req *http.Request) {
return
}
+ firstService := func(cs *pb.ClientStatsPayload) string {
+ if cs == nil || len(cs.Stats) == 0 || len(cs.Stats[0].Stats) == 0 {
+ return ""
+ }
+ return cs.Stats[0].Stats[0].Service
+ }
+
+ ts := r.tagStats(V06, req.Header, firstService(in))
_ = r.statsd.Count("datadog.trace_agent.receiver.stats_payload", 1, ts.AsTags(), 1)
_ = r.statsd.Count("datadog.trace_agent.receiver.stats_bytes", rd.Count, ts.AsTags(), 1)
_ = r.statsd.Count("datadog.trace_agent.receiver.stats_buckets", int64(len(in.Stats)), ts.AsTags(), 1)
@@ -465,7 +473,6 @@ func (r *HTTPReceiver) handleStats(w http.ResponseWriter, req *http.Request) {
// handleTraces knows how to handle a bunch of traces
func (r *HTTPReceiver) handleTraces(v Version, w http.ResponseWriter, req *http.Request) {
- ts := r.tagStats(v, req.Header)
tracen, err := traceCount(req)
if err == errInvalidHeaderTraceCountValue {
log.Errorf("Failed to count traces: %s", err)
@@ -487,7 +494,7 @@ func (r *HTTPReceiver) handleTraces(v Version, w http.ResponseWriter, req *http.
w.WriteHeader(r.rateLimiterResponse)
}
r.replyOK(req, v, w)
- ts.PayloadRefused.Inc()
+ r.tagStats(v, req.Header, "").PayloadRefused.Inc()
return
}
defer func() {
@@ -496,8 +503,16 @@ func (r *HTTPReceiver) handleTraces(v Version, w http.ResponseWriter, req *http.
<-r.recvsem
}()
+ firstService := func(tp *pb.TracerPayload) string {
+ if tp == nil || len(tp.Chunks) == 0 || len(tp.Chunks[0].Spans) == 0 {
+ return ""
+ }
+ return tp.Chunks[0].Spans[0].Service
+ }
+
start := time.Now()
- tp, ranHook, err := decodeTracerPayload(v, req, ts, r.containerIDProvider)
+ tp, ranHook, err := decodeTracerPayload(v, req, r.containerIDProvider, req.Header.Get(header.Lang), req.Header.Get(header.LangVersion), req.Header.Get(header.TracerVersion))
+ ts := r.tagStats(v, req.Header, firstService(tp))
defer func(err error) {
tags := append(ts.AsTags(), fmt.Sprintf("success:%v", err == nil))
_ = r.statsd.Histogram("datadog.trace_agent.receiver.serve_traces_ms", float64(time.Since(start))/float64(time.Millisecond), tags, 1)
@@ -593,9 +608,7 @@ func droppedTracesFromHeader(h http.Header, ts *info.TagStats) int64 {
}
// handleServices handle a request with a list of several services
-//
-//nolint:revive // TODO(APM) Fix revive linter
-func (r *HTTPReceiver) handleServices(v Version, w http.ResponseWriter, req *http.Request) {
+func (r *HTTPReceiver) handleServices(_ Version, w http.ResponseWriter, _ *http.Request) {
httpOK(w)
// Do nothing, services are no longer being sent to Datadog as of July 2019
@@ -609,7 +622,7 @@ func (r *HTTPReceiver) loop() {
var lastLog time.Time
accStats := info.NewReceiverStats()
- t := time.NewTicker(10 * time.Second)
+ t := time.NewTicker(5 * time.Second)
defer t.Stop()
tw := time.NewTicker(r.conf.WatchdogInterval)
defer tw.Stop()
@@ -745,6 +758,7 @@ func traceChunksFromSpans(spans []*pb.Span) []*pb.TraceChunk {
traceChunks = append(traceChunks, &pb.TraceChunk{
Priority: int32(sampler.PriorityNone),
Spans: t,
+ Tags: make(map[string]string),
})
}
return traceChunks
@@ -756,6 +770,7 @@ func traceChunksFromTraces(traces pb.Traces) []*pb.TraceChunk {
traceChunks = append(traceChunks, &pb.TraceChunk{
Priority: int32(sampler.PriorityNone),
Spans: trace,
+ Tags: make(map[string]string),
})
}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/apiutil/limited_reader.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/apiutil/limited_reader.go
index 66df228ccc..296c483be9 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/apiutil/limited_reader.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/apiutil/limited_reader.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//nolint:revive // TODO(APM) Fix revive linter
+// Package apiutil provides utility functions for the API.
package apiutil
import (
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/container.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/container.go
index e7e81dc805..508277c5ab 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/container.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/container.go
@@ -16,9 +16,7 @@ import (
)
// connContext is unimplemented for non-linux builds.
-//
-//nolint:revive // TODO(APM) Fix revive linter
-func connContext(ctx context.Context, c net.Conn) context.Context {
+func connContext(ctx context.Context, _ net.Conn) context.Context {
return ctx
}
@@ -34,7 +32,7 @@ func NewIDProvider(_ string) IDProvider {
return &idProvider{}
}
-//nolint:revive // TODO(APM) Fix revive linter
-func (_ *idProvider) GetContainerID(_ context.Context, h http.Header) string {
+// GetContainerID returns the container ID from the http header.
+func (*idProvider) GetContainerID(_ context.Context, h http.Header) string {
return h.Get(header.ContainerID)
}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/container_linux.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/container_linux.go
index 6eeab347e6..cd933730fc 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/container_linux.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/container_linux.go
@@ -46,17 +46,27 @@ func connContext(ctx context.Context, c net.Conn) context.Context {
if !ok {
return ctx
}
- file, err := s.File()
+ raw, err := s.SyscallConn()
if err != nil {
- log.Debugf("Failed to obtain unix socket file: %v", err)
+ log.Debugf("Failed to read credentials from unix socket: %v", err)
return ctx
}
- fd := int(file.Fd())
- ucred, err := syscall.GetsockoptUcred(fd, syscall.SOL_SOCKET, syscall.SO_PEERCRED)
+ var (
+ ucred *syscall.Ucred
+ cerr error
+ )
+ err = raw.Control(func(fd uintptr) {
+ ucred, cerr = syscall.GetsockoptUcred(int(fd), syscall.SOL_SOCKET, syscall.SO_PEERCRED)
+ })
if err != nil {
- log.Debugf("Failed to read credentials from unix socket: %v", err)
+ log.Debugf("Failed to control raw unix socket: %v", err)
return ctx
}
+ if cerr != nil {
+ log.Debugf("Failed to read credentials from unix socket: %v", cerr)
+ return ctx
+ }
+
return context.WithValue(ctx, ucredKey{}, ucred)
}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/dogstatsd.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/dogstatsd.go
index 15f3963b0c..7207fcc8e6 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/dogstatsd.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/dogstatsd.go
@@ -16,8 +16,9 @@ import (
)
// dogstatsdProxyHandler returns a new HTTP handler which will proxy requests to
-// the DogStatsD endpoint in the Core Agent over UDP or UDS (defaulting to UDS
-// if StatsdSocket is set in the *AgentConfig).
+// the DogStatsD endpoint in the Core Agent over UDP. Communication between the
+// proxy and the agent does not support UDS (see #13628), and so does not guarantee delivery of
+// all statsd payloads.
func (r *HTTPReceiver) dogstatsdProxyHandler() http.Handler {
if !r.conf.StatsdEnabled {
log.Info("DogstatsD disabled in the Agent configuration. The DogstatsD proxy endpoint will be non-functional.")
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/internal/header/headers.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/internal/header/headers.go
index a0c7d5c7c7..1306dc4b52 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/internal/header/headers.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/internal/header/headers.go
@@ -3,9 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-// package header defines HTTP headers known convention used by the Trace Agent and Datadog's APM intake.
-//
-//nolint:revive // TODO(APM) Fix revive linter
+// Package header defines HTTP headers known convention used by the Trace Agent and Datadog's APM intake.
package header
const (
@@ -63,8 +61,7 @@ const (
// If both agent and client have the same version, the agent won't return rates in API response.
RatesPayloadVersion = "Datadog-Rates-Payload-Version"
- //nolint:revive // TODO(APM) Fix revive linter
- // SendTrueHTTPStatus can be sent by the client to signal to the agent that
+ // SendRealHTTPStatus can be sent by the client to signal to the agent that
// it wants to receive the "real" status in the response. By default, the agent
// will send a 200 OK response for every payload, even those dropped due to
// intake limits.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/listener.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/listener.go
index ebb2ce768f..01e3d71077 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/listener.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/listener.go
@@ -93,7 +93,7 @@ func (c *onCloseConn) Close() error {
return err
}
-//nolint:revive // TODO(APM) Fix revive linter
+// OnCloseConn returns a net.Conn that calls onclose when closed.
func OnCloseConn(c net.Conn, onclose func()) net.Conn {
return &onCloseConn{c, onclose, sync.Once{}}
}
@@ -109,10 +109,11 @@ func (ln *measuredListener) Accept() (net.Conn, error) {
} else {
ln.errored.Inc()
}
- } else {
- ln.accepted.Inc()
- log.Tracef("Accepted connection named %q.", ln.name)
+ <-ln.sem
+ return nil, err
}
+ ln.accepted.Inc()
+ log.Tracef("Accepted connection named %q.", ln.name)
conn = OnCloseConn(conn, func() {
<-ln.sem
})
@@ -231,11 +232,11 @@ func (sl *rateLimitedListener) Accept() (net.Conn, error) {
if ne.Temporary() {
// deadline expired; continue
continue
- } else { //nolint:revive // TODO(APM) Fix revive linter
- // don't count temporary errors; they usually signify expired deadlines
- // see (golang/go/src/internal/poll/fd.go).TimeoutError
- sl.timedout.Inc()
}
+ // don't count temporary errors; they usually signify expired deadlines
+ // see (golang/go/src/internal/poll/fd.go).TimeoutError
+ sl.timedout.Inc()
+
} else {
sl.errored.Inc()
}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/otlp.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/otlp.go
index 53a5007920..bac5fb3d68 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/otlp.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/otlp.go
@@ -35,7 +35,6 @@ import (
"go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp"
semconv117 "go.opentelemetry.io/collector/semconv/v1.17.0"
semconv "go.opentelemetry.io/collector/semconv/v1.6.1"
- "go.opentelemetry.io/otel/attribute"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
@@ -44,10 +43,6 @@ import (
// computed for the resource spans.
const keyStatsComputed = "_dd.stats_computed"
-var (
- signalTypeSet = attribute.NewSet(attribute.String("signal", "traces"))
-)
-
var _ (ptraceotlp.GRPCServer) = (*OTLPReceiver)(nil)
// OTLPReceiver implements an OpenTelemetry Collector receiver which accepts incoming
@@ -65,6 +60,11 @@ type OTLPReceiver struct {
// NewOTLPReceiver returns a new OTLPReceiver which sends any incoming traces down the out channel.
func NewOTLPReceiver(out chan<- *Payload, cfg *config.AgentConfig, statsd statsd.ClientInterface, timing timing.Reporter) *OTLPReceiver {
+ computeTopLevelBySpanKindVal := 0.0
+ if cfg.HasFeature("enable_otlp_compute_top_level_by_span_kind") {
+ computeTopLevelBySpanKindVal = 1.0
+ }
+ _ = statsd.Gauge("datadog.trace_agent.otlp.compute_top_level_by_span_kind", computeTopLevelBySpanKindVal, nil, 1)
return &OTLPReceiver{out: out, conf: cfg, cidProvider: NewIDProvider(cfg.ContainerProcRoot), statsd: statsd, timing: timing}
}
@@ -185,7 +185,7 @@ func (o *OTLPReceiver) sample(tid uint64) sampler.SamplingPriority {
func (o *OTLPReceiver) ReceiveResourceSpans(ctx context.Context, rspans ptrace.ResourceSpans, httpHeader http.Header) source.Source {
// each rspans is coming from a different resource and should be considered
// a separate payload; typically there is only one item in this slice
- src, srcok := o.conf.OTLPReceiver.AttributesTranslator.ResourceToSource(ctx, rspans.Resource(), signalTypeSet)
+ src, srcok := o.conf.OTLPReceiver.AttributesTranslator.ResourceToSource(ctx, rspans.Resource(), traceutil.SignalTypeSet)
hostFromMap := func(m map[string]string, key string) {
// hostFromMap sets the hostname to m[key] if it is set.
if v, ok := m[key]; ok {
@@ -264,7 +264,7 @@ func (o *OTLPReceiver) ReceiveResourceSpans(ctx context.Context, rspans ptrace.R
p := Payload{
Source: tagstats,
ClientComputedStats: rattr[keyStatsComputed] != "" || httpHeader.Get(header.ComputedStats) != "",
- ClientComputedTopLevel: httpHeader.Get(header.ComputedTopLevel) != "",
+ ClientComputedTopLevel: o.conf.HasFeature("enable_otlp_compute_top_level_by_span_kind") || httpHeader.Get(header.ComputedTopLevel) != "",
}
if env == "" {
env = o.conf.DefaultEnv
@@ -520,8 +520,14 @@ func (o *OTLPReceiver) convertSpan(rattr map[string]string, lib pcommon.Instrume
for k, v := range rattr {
setMetaOTLP(span, k, v)
}
+
+ spanKind := in.Kind()
+ if o.conf.HasFeature("enable_otlp_compute_top_level_by_span_kind") {
+ computeTopLevelAndMeasured(span, spanKind)
+ }
+
setMetaOTLP(span, "otel.trace_id", hex.EncodeToString(traceID[:]))
- setMetaOTLP(span, "span.kind", spanKindName(in.Kind()))
+ setMetaOTLP(span, "span.kind", spanKindName(spanKind))
if _, ok := span.Meta["version"]; !ok {
if ver := rattr[string(semconv.AttributeServiceVersion)]; ver != "" {
setMetaOTLP(span, "version", ver)
@@ -533,6 +539,10 @@ func (o *OTLPReceiver) convertSpan(rattr map[string]string, lib pcommon.Instrume
if in.Links().Len() > 0 {
setMetaOTLP(span, "_dd.span_links", marshalLinks(in.Links()))
}
+
+ var gotMethodFromNewConv bool
+ var gotStatusCodeFromNewConv bool
+
in.Attributes().Range(func(k string, v pcommon.Value) bool {
switch v.Type() {
case pcommon.ValueTypeDouble:
@@ -540,8 +550,37 @@ func (o *OTLPReceiver) convertSpan(rattr map[string]string, lib pcommon.Instrume
case pcommon.ValueTypeInt:
setMetricOTLP(span, k, float64(v.Int()))
default:
- setMetaOTLP(span, k, v.AsString())
+ // Exclude Datadog APM conventions.
+ // These are handled below explicitly.
+ if k != "http.method" && k != "http.status_code" {
+ setMetaOTLP(span, k, v.AsString())
+ }
+ }
+
+ // `http.method` was renamed to `http.request.method` in the HTTP stabilization from v1.23.
+ // See https://opentelemetry.io/docs/specs/semconv/http/migration-guide/#summary-of-changes
+ // `http.method` is also the Datadog APM convention for the HTTP method.
+ // We check both conventions and use the new one if it is present.
+ // See https://datadoghq.atlassian.net/wiki/spaces/APM/pages/2357395856/Span+attributes#[inlineExtension]HTTP
+ if k == "http.request.method" {
+ gotMethodFromNewConv = true
+ setMetaOTLP(span, "http.method", v.AsString())
+ } else if k == "http.method" && !gotMethodFromNewConv {
+ setMetaOTLP(span, "http.method", v.AsString())
+ }
+
+ // `http.status_code` was renamed to `http.response.status_code` in the HTTP stabilization from v1.23.
+ // See https://opentelemetry.io/docs/specs/semconv/http/migration-guide/#summary-of-changes
+ // `http.status_code` is also the Datadog APM convention for the HTTP status code.
+ // We check both conventions and use the new one if it is present.
+ // See https://datadoghq.atlassian.net/wiki/spaces/APM/pages/2357395856/Span+attributes#[inlineExtension]HTTP
+ if k == "http.response.status_code" {
+ gotStatusCodeFromNewConv = true
+ setMetaOTLP(span, "http.status_code", v.AsString())
+ } else if k == "http.status_code" && !gotStatusCodeFromNewConv {
+ setMetaOTLP(span, "http.status_code", v.AsString())
}
+
return true
})
if _, ok := span.Meta["env"]; !ok {
@@ -597,7 +636,9 @@ func (o *OTLPReceiver) convertSpan(rattr map[string]string, lib pcommon.Instrume
// resourceFromTags attempts to deduce a more accurate span resource from the given list of tags meta.
// If this is not possible, it returns an empty string.
func resourceFromTags(meta map[string]string) string {
- if m := meta[string(semconv.AttributeHTTPMethod)]; m != "" {
+ // `http.method` was renamed to `http.request.method` in the HTTP stabilization from v1.23.
+ // See https://opentelemetry.io/docs/specs/semconv/http/migration-guide/#summary-of-changes
+ if _, m := getFirstFromMap(meta, "http.request.method", "http.method"); m != "" {
// use the HTTP method + route (if available)
if _, route := getFirstFromMap(meta, semconv.AttributeHTTPRoute, "grpc.path"); route != "" {
return m + " " + route
@@ -659,8 +700,12 @@ func status2Error(status ptrace.Status, events ptrace.SpanEventSlice, span *pb.S
if status.Message() != "" {
// use the status message
span.Meta["error.msg"] = status.Message()
- } else if httpcode, ok := span.Meta["http.status_code"]; ok {
- // we have status code that we can use as details
+ } else if _, httpcode := getFirstFromMap(span.Meta, "http.response.status_code", "http.status_code"); httpcode != "" {
+ // `http.status_code` was renamed to `http.response.status_code` in the HTTP stabilization from v1.23.
+ // See https://opentelemetry.io/docs/specs/semconv/http/migration-guide/#summary-of-changes
+
+ // http.status_text was removed in spec v0.7.0 (https://github.com/open-telemetry/opentelemetry-specification/pull/972)
+ // TODO (OTEL-1791) Remove this and use a map from status code to status text.
if httptext, ok := span.Meta["http.status_text"]; ok {
span.Meta["error.msg"] = fmt.Sprintf("%s %s", httpcode, httptext)
} else {
@@ -719,3 +764,22 @@ func spanKindName(k ptrace.SpanKind) string {
}
return name
}
+
+// computeTopLevelAndMeasured updates the span's top-level and measured attributes.
+//
+// An OTLP span is considered top-level if it is a root span or has a span kind of server or consumer.
+// An OTLP span is marked as measured if it has a span kind of client or producer.
+func computeTopLevelAndMeasured(span *pb.Span, spanKind ptrace.SpanKind) {
+ if span.ParentID == 0 {
+ // span is a root span
+ traceutil.SetTopLevel(span, true)
+ }
+ if spanKind == ptrace.SpanKindServer || spanKind == ptrace.SpanKindConsumer {
+ // span is a server-side span
+ traceutil.SetTopLevel(span, true)
+ }
+ if spanKind == ptrace.SpanKindClient || spanKind == ptrace.SpanKindProducer {
+ // span is a client-side span, not top-level but we still want stats
+ traceutil.SetMeasured(span, true)
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/version.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/version.go
index 793b22e564..c55f594acf 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/version.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/api/version.go
@@ -109,6 +109,15 @@ const (
//
v05 Version = "v0.5"
+ // V06 API
+ //
+ // Request: Stats Payload.
+ // Content-Type: application/msgpack
+ // Payload: ClientStatsPayload (pkg/proto/datadog/trace/stats.proto)
+ //
+ //
+ V06 Version = "v0.6"
+
// V07 API
//
// Request: Tracer Payload.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/client.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/client.go
index a4c675c9c4..b46de4fafb 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/client.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/client.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//nolint:revive // TODO(APM) Fix revive linter
+// Package config contains the configuration for the trace-agent.
package config
import (
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/config.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/config.go
index 5344484505..a9ffdd1cea 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/config.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/config/config.go
@@ -310,6 +310,11 @@ type AgentConfig struct {
RareSamplerCooldownPeriod time.Duration
RareSamplerCardinality int
+ // Probabilistic Sampler configuration
+ ProbabilisticSamplerEnabled bool
+ ProbabilisticSamplerHashSeed uint32
+ ProbabilisticSamplerSamplingPercentage float32
+
// Receiver
ReceiverHost string
ReceiverPort int
@@ -348,8 +353,7 @@ type AgentConfig struct {
StatsdSocket string // for UDS Sockets
// logging
- LogFilePath string
- LogThrottling bool
+ LogFilePath string
// watchdog
MaxMemory float64 // MaxMemory is the threshold (bytes allocated) above which program panics and exits, to be restarted
@@ -501,7 +505,6 @@ func New() *AgentConfig {
StatsdPort: 8125,
StatsdEnabled: true,
- LogThrottling: true,
LambdaFunctionName: os.Getenv("AWS_LAMBDA_FUNCTION_NAME"),
MaxMemory: 5e8, // 500 Mb, should rarely go above 50 Mb
@@ -584,13 +587,13 @@ func (c *AgentConfig) NewHTTPTransport() *http.Transport {
return transport
}
-//nolint:revive // TODO(APM) Fix revive linter
+// HasFeature returns true if the agent has the given feature flag.
func (c *AgentConfig) HasFeature(feat string) bool {
_, ok := c.Features[feat]
return ok
}
-//nolint:revive // TODO(APM) Fix revive linter
+// AllFeatures returns a slice of all the feature flags the agent has.
func (c *AgentConfig) AllFeatures() []string {
feats := []string{}
for feat := range c.Features {
@@ -599,7 +602,6 @@ func (c *AgentConfig) AllFeatures() []string {
return feats
}
-//nolint:revive // TODO(APM) Fix revive linter
func inAzureAppServices() bool {
_, existsLinux := os.LookupEnv("APPSVC_RUN_ZIP")
_, existsWin := os.LookupEnv("WEBSITE_APPSERVICEAPPLOGS_TRACE_ENABLED")
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor_legacy.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor_legacy.go
index 6e25270a81..be75e5108a 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor_legacy.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/event/extractor_legacy.go
@@ -34,9 +34,7 @@ func NewLegacyExtractor(rateByService map[string]float64) Extractor {
// span's service. In this case the extracted event is returned along with the found extraction rate and a true value.
// If this rate doesn't exist or the provided span is not a top level one, then no extraction is done and false is
// returned as the third value, with the others being invalid.
-//
-//nolint:revive // TODO(APM) Fix revive linter
-func (e *legacyExtractor) Extract(s *pb.Span, priority sampler.SamplingPriority) (float64, bool) {
+func (e *legacyExtractor) Extract(s *pb.Span, _ sampler.SamplingPriority) (float64, bool) {
if !traceutil.HasTopLevel(s) {
return 0, false
}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/filters/blacklister.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/filters/blacklister.go
index adc2e9217e..fe235e1fec 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/filters/blacklister.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/filters/blacklister.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//nolint:revive // TODO(APM) Fix revive linter
+// Package filters provides a way to filter out spans based on their properties.
package filters
import (
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/filters/replacer.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/filters/replacer.go
index 6dc3fb678f..2616b0c463 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/filters/replacer.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/filters/replacer.go
@@ -8,6 +8,7 @@ package filters
import (
"regexp"
"strconv"
+ "strings"
pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace"
"github.com/DataDog/datadog-agent/pkg/trace/config"
@@ -24,6 +25,8 @@ func NewReplacer(rules []*config.ReplaceRule) *Replacer {
return &Replacer{rules: rules}
}
+const hiddenTagPrefix = "_"
+
// Replace replaces all tags matching the Replacer's rules.
func (f Replacer) Replace(trace pb.Trace) {
for _, rule := range f.rules {
@@ -32,10 +35,14 @@ func (f Replacer) Replace(trace pb.Trace) {
switch key {
case "*":
for k := range s.Meta {
- s.Meta[k] = re.ReplaceAllString(s.Meta[k], str)
+ if !strings.HasPrefix(k, hiddenTagPrefix) {
+ s.Meta[k] = re.ReplaceAllString(s.Meta[k], str)
+ }
}
for k := range s.Metrics {
- f.replaceNumericTag(re, s, k, str)
+ if !strings.HasPrefix(k, hiddenTagPrefix) {
+ f.replaceNumericTag(re, s, k, str)
+ }
}
s.Resource = re.ReplaceAllString(s.Resource, str)
case "resource.name":
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/info/endpoint.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/info/endpoint.go
index 8517766b00..03b4a10bcf 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/info/endpoint.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/info/endpoint.go
@@ -3,7 +3,6 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//nolint:revive // TODO(APM) Fix revive linter
package info
// EndpointStats contains stats about the volume of data written
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/info/info.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/info/info.go
index 82c3014bbc..4fa59a4499 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/info/info.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/info/info.go
@@ -3,6 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
+// Package info exposes internal information about the trace-agent.
package info
import (
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/info/stats.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/info/stats.go
index 742dbc3a10..289390aa3f 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/info/stats.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/info/stats.go
@@ -503,6 +503,7 @@ func (ts *TagStats) WarnString() string {
type Tags struct {
Lang, LangVersion, LangVendor, Interpreter, TracerVersion string
EndpointVersion string
+ Service string
}
// toArray will transform the Tags struct into a slice of string.
@@ -528,6 +529,9 @@ func (t *Tags) toArray() []string {
if t.EndpointVersion != "" {
tags = append(tags, "endpoint_version:"+t.EndpointVersion)
}
+ if t.Service != "" {
+ tags = append(tags, "service:"+t.Service)
+ }
return tags
}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/log/buflogger.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/log/buflogger.go
index db6972f931..90672147d5 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/log/buflogger.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/log/buflogger.go
@@ -5,7 +5,6 @@
//go:build test
-//nolint:revive // TODO(APM) Fix revive linter
package log
import (
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/log/logger.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/log/logger.go
index 8edfdafa3c..552eeaa02f 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/log/logger.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/log/logger.go
@@ -3,6 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
+// Package log implements the trace-agent logger.
package log
import (
@@ -156,64 +157,40 @@ var NoopLogger = noopLogger{}
type noopLogger struct{}
// Trace implements Logger.
-//
-//nolint:revive // TODO(APM) Fix revive linter
-func (noopLogger) Trace(v ...interface{}) {}
+func (noopLogger) Trace(_ ...interface{}) {}
// Tracef implements Logger.
-//
-//nolint:revive // TODO(APM) Fix revive linter
-func (noopLogger) Tracef(format string, params ...interface{}) {}
+func (noopLogger) Tracef(_ string, _ ...interface{}) {}
// Debug implements Logger.
-//
-//nolint:revive // TODO(APM) Fix revive linter
-func (noopLogger) Debug(v ...interface{}) {}
+func (noopLogger) Debug(_ ...interface{}) {}
// Debugf implements Logger.
-//
-//nolint:revive // TODO(APM) Fix revive linter
-func (noopLogger) Debugf(format string, params ...interface{}) {}
+func (noopLogger) Debugf(_ string, _ ...interface{}) {}
// Info implements Logger.
-//
-//nolint:revive // TODO(APM) Fix revive linter
-func (noopLogger) Info(v ...interface{}) {}
+func (noopLogger) Info(_ ...interface{}) {}
// Infof implements Logger.
-//
-//nolint:revive // TODO(APM) Fix revive linter
-func (noopLogger) Infof(format string, params ...interface{}) {}
+func (noopLogger) Infof(_ string, _ ...interface{}) {}
// Warn implements Logger.
-//
-//nolint:revive // TODO(APM) Fix revive linter
-func (noopLogger) Warn(v ...interface{}) error { return nil }
+func (noopLogger) Warn(_ ...interface{}) error { return nil }
// Warnf implements Logger.
-//
-//nolint:revive // TODO(APM) Fix revive linter
-func (noopLogger) Warnf(format string, params ...interface{}) error { return nil }
+func (noopLogger) Warnf(_ string, _ ...interface{}) error { return nil }
// Error implements Logger.
-//
-//nolint:revive // TODO(APM) Fix revive linter
-func (noopLogger) Error(v ...interface{}) error { return nil }
+func (noopLogger) Error(_ ...interface{}) error { return nil }
// Errorf implements Logger.
-//
-//nolint:revive // TODO(APM) Fix revive linter
-func (noopLogger) Errorf(format string, params ...interface{}) error { return nil }
+func (noopLogger) Errorf(_ string, _ ...interface{}) error { return nil }
// Critical implements Logger.
-//
-//nolint:revive // TODO(APM) Fix revive linter
-func (noopLogger) Critical(v ...interface{}) error { return nil }
+func (noopLogger) Critical(_ ...interface{}) error { return nil }
// Criticalf implements Logger.
-//
-//nolint:revive // TODO(APM) Fix revive linter
-func (noopLogger) Criticalf(format string, params ...interface{}) error { return nil }
+func (noopLogger) Criticalf(_ string, _ ...interface{}) error { return nil }
// Flush implements Logger.
func (noopLogger) Flush() {}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/remoteconfighandler/remote_config_handler.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/remoteconfighandler/remote_config_handler.go
index 4c6c6d698d..19972ab25a 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/remoteconfighandler/remote_config_handler.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/remoteconfighandler/remote_config_handler.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//nolint:revive // TODO(APM) Fix revive linter
+// Package remoteconfighandler holds the logic responsible for updating the samplers when the remote configuration changes.
package remoteconfighandler
import (
@@ -46,7 +46,7 @@ type RemoteConfigHandler struct {
configSetEndpointFormatString string
}
-//nolint:revive // TODO(APM) Fix revive linter
+// New creates a new RemoteConfigHandler
func New(conf *config.AgentConfig, prioritySampler prioritySampler, rareSampler rareSampler, errorsSampler errorsSampler) *RemoteConfigHandler {
if conf.RemoteConfigClient == nil {
return nil
@@ -73,7 +73,7 @@ func New(conf *config.AgentConfig, prioritySampler prioritySampler, rareSampler
}
}
-//nolint:revive // TODO(APM) Fix revive linter
+// Start starts the remote config handler
func (h *RemoteConfigHandler) Start() {
if h == nil {
return
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/prioritysampler.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/prioritysampler.go
index 3e42490d90..9753de6bf3 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/prioritysampler.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/prioritysampler.go
@@ -76,12 +76,12 @@ func (s *PrioritySampler) Start() {
}()
}
-//nolint:revive // TODO(APM) Fix revive linter
+// UpdateTargetTPS updates the target tps
func (s *PrioritySampler) UpdateTargetTPS(targetTPS float64) {
s.sampler.updateTargetTPS(targetTPS)
}
-//nolint:revive // TODO(APM) Fix revive linter
+// GetTargetTPS returns the target tps
func (s *PrioritySampler) GetTargetTPS() float64 {
return s.sampler.targetTPS.Load()
}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/probabilistic.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/probabilistic.go
new file mode 100644
index 0000000000..98c34ff208
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/probabilistic.go
@@ -0,0 +1,157 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2022-present Datadog, Inc.
+
+package sampler
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "hash/fnv"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace"
+ "github.com/DataDog/datadog-agent/pkg/trace/config"
+ "github.com/DataDog/datadog-agent/pkg/trace/log"
+ "github.com/DataDog/datadog-agent/pkg/trace/watchdog"
+
+ "go.uber.org/atomic"
+
+ "github.com/DataDog/datadog-go/v5/statsd"
+)
+
+const (
+ // These constants exist to match the behavior of the OTEL probabilistic sampler.
+ // See: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/6229c6ad1c49e9cc4b41a8aab8cb5a94a7b82ea5/processor/probabilisticsamplerprocessor/tracesprocessor.go#L38-L42
+ numProbabilisticBuckets = 0x4000
+ bitMaskHashBuckets = numProbabilisticBuckets - 1
+ percentageScaleFactor = numProbabilisticBuckets / 100.0
+)
+
+// ProbabilisticSampler is a sampler that overrides all other samplers,
+// it deterministically samples incoming traces by a hash of their trace ID
+type ProbabilisticSampler struct {
+ enabled bool
+ hashSeed []byte
+ scaledSamplingPercentage uint32
+
+ statsd statsd.ClientInterface
+ tracesSeen *atomic.Int64
+ tracesKept *atomic.Int64
+ tags []string
+
+ // start/stop synchronization
+ stopOnce sync.Once
+ stop chan struct{}
+ stopped chan struct{}
+}
+
+// NewProbabilisticSampler returns a new ProbabilisticSampler that deterministically samples
+// a given percentage of incoming spans based on their trace ID
+func NewProbabilisticSampler(conf *config.AgentConfig, statsd statsd.ClientInterface) *ProbabilisticSampler {
+ hashSeedBytes := make([]byte, 4)
+ binary.LittleEndian.PutUint32(hashSeedBytes, conf.ProbabilisticSamplerHashSeed)
+ return &ProbabilisticSampler{
+ enabled: conf.ProbabilisticSamplerEnabled,
+ hashSeed: hashSeedBytes,
+ scaledSamplingPercentage: uint32(conf.ProbabilisticSamplerSamplingPercentage * percentageScaleFactor),
+ statsd: statsd,
+ tracesSeen: atomic.NewInt64(0),
+ tracesKept: atomic.NewInt64(0),
+ tags: []string{"sampler:probabilistic"},
+ stop: make(chan struct{}),
+ stopped: make(chan struct{}),
+ }
+}
+
+// Start starts up the ProbabilisticSamler's support routine, which periodically sends stats.
+func (ps *ProbabilisticSampler) Start() {
+ if !ps.enabled {
+ close(ps.stopped)
+ return
+ }
+ go func() {
+ defer watchdog.LogOnPanic(ps.statsd)
+ statsTicker := time.NewTicker(10 * time.Second)
+ defer statsTicker.Stop()
+ for {
+ select {
+ case <-statsTicker.C:
+ ps.report()
+ case <-ps.stop:
+ ps.report()
+ close(ps.stopped)
+ return
+ }
+ }
+ }()
+
+}
+
+// Stop shuts down the ProbabilisticSampler's support routine.
+func (ps *ProbabilisticSampler) Stop() {
+ if !ps.enabled {
+ return
+ }
+ ps.stopOnce.Do(func() {
+ close(ps.stop)
+ <-ps.stopped
+ })
+}
+
+// Sample a trace given the chunk's root span, returns true if the trace should be kept
+func (ps *ProbabilisticSampler) Sample(root *trace.Span) bool {
+ if !ps.enabled {
+ return false
+ }
+ ps.tracesSeen.Add(1)
+
+ tid, err := get128BitTraceID(root)
+ if err != nil {
+ log.Errorf("Unable to probabilistically sample, failed to determine 128-bit trace ID from incoming span: %v", err)
+ return false
+ }
+
+ hasher := fnv.New32a()
+ _, _ = hasher.Write(ps.hashSeed)
+ _, _ = hasher.Write(tid)
+ hash := hasher.Sum32()
+ keep := hash&bitMaskHashBuckets < ps.scaledSamplingPercentage
+ if keep {
+ ps.tracesKept.Add(1)
+ }
+ return keep
+}
+
+func (ps *ProbabilisticSampler) report() {
+ seen := ps.tracesSeen.Swap(0)
+ kept := ps.tracesKept.Swap(0)
+ _ = ps.statsd.Count("datadog.trace_agent.sampler.kept", kept, ps.tags, 1)
+ _ = ps.statsd.Count("datadog.trace_agent.sampler.seen", seen, ps.tags, 1)
+}
+
+func get128BitTraceID(span *trace.Span) ([]byte, error) {
+ // If it's an otel span the whole trace ID is in otel.trace
+ if tid, ok := span.Meta["otel.trace_id"]; ok {
+ bs, err := hex.DecodeString(tid)
+ if err != nil {
+ return nil, err
+ }
+ return bs, nil
+ }
+ tid := make([]byte, 16)
+ binary.BigEndian.PutUint64(tid[8:], span.TraceID)
+ // Get hex encoded upper bits for datadog spans
+ // If no value is found we can use the default `0` value as that's what will have been propagated
+ if upper, ok := span.Meta["_dd.p.tid"]; ok {
+ u, err := strconv.ParseUint(upper, 16, 64)
+ if err != nil {
+ return nil, err
+ }
+ binary.BigEndian.PutUint64(tid[:8], u)
+ }
+ return tid, nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/rare_sampler.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/rare_sampler.go
index b7a9933618..1b0907e782 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/rare_sampler.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/rare_sampler.go
@@ -20,8 +20,6 @@ import (
)
const (
- // priorityTTL allows to blacklist p1 spans that are sampled entirely, for this period.
- priorityTTL = 10 * time.Minute
// ttlRenewalPeriod specifies the frequency at which we will upload cached entries.
ttlRenewalPeriod = 1 * time.Minute
// rareSamplerBurst sizes the token store used by the rate limiter.
@@ -45,7 +43,6 @@ type RareSampler struct {
tickStats *time.Ticker
limiter *rate.Limiter
ttl time.Duration
- priorityTTL time.Duration
cardinality int
seen map[Signature]*seenSpans
statsd statsd.ClientInterface
@@ -61,15 +58,12 @@ func NewRareSampler(conf *config.AgentConfig, statsd statsd.ClientInterface) *Ra
shrinks: atomic.NewInt64(0),
limiter: rate.NewLimiter(rate.Limit(conf.RareSamplerTPS), rareSamplerBurst),
ttl: conf.RareSamplerCooldownPeriod,
- priorityTTL: priorityTTL,
cardinality: conf.RareSamplerCardinality,
seen: make(map[Signature]*seenSpans),
tickStats: time.NewTicker(10 * time.Second),
statsd: statsd,
}
- if e.ttl > e.priorityTTL {
- e.priorityTTL = e.ttl
- }
+
go func() {
for range e.tickStats.C {
e.report()
@@ -84,11 +78,6 @@ func (e *RareSampler) Sample(now time.Time, t *pb.TraceChunk, env string) bool {
if !e.enabled.Load() {
return false
}
-
- if priority, ok := GetSamplingPriority(t); priority > 0 && ok {
- e.handlePriorityTrace(now, env, t, e.priorityTTL)
- return false
- }
return e.handleTrace(now, env, t)
}
@@ -97,12 +86,12 @@ func (e *RareSampler) Stop() {
e.tickStats.Stop()
}
-//nolint:revive // TODO(APM) Fix revive linter
+// SetEnabled marks the sampler as enabled or disabled
func (e *RareSampler) SetEnabled(enabled bool) {
e.enabled.Store(enabled)
}
-//nolint:revive // TODO(APM) Fix revive linter
+// IsEnabled returns whether the sampler is enabled
func (e *RareSampler) IsEnabled() bool {
return e.enabled.Load()
}
@@ -127,6 +116,7 @@ func (e *RareSampler) handleTrace(now time.Time, env string, t *pb.TraceChunk) b
break
}
}
+
if sampled {
e.handlePriorityTrace(now, env, t, e.ttl)
}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/scoresampler.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/scoresampler.go
index 1a83a72d20..8a0b744f81 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/scoresampler.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/sampler/scoresampler.go
@@ -74,12 +74,12 @@ func (s *ScoreSampler) Sample(now time.Time, trace pb.Trace, root *pb.Span, env
return s.applySampleRate(root, rate)
}
-//nolint:revive // TODO(APM) Fix revive linter
+// UpdateTargetTPS updates the target tps
func (s *ScoreSampler) UpdateTargetTPS(targetTPS float64) {
s.Sampler.updateTargetTPS(targetTPS)
}
-//nolint:revive // TODO(APM) Fix revive linter
+// GetTargetTPS returns the target tps
func (s *ScoreSampler) GetTargetTPS() float64 {
return s.Sampler.targetTPS.Load()
}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/aggregation.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/aggregation.go
index 543fa9bf15..9fc8f696ae 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/aggregation.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/aggregation.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//nolint:revive // TODO(APM) Fix revive linter
+// Package stats contains the logic to process APM stats.
package stats
import (
@@ -39,6 +39,7 @@ type BucketsAggregationKey struct {
StatusCode uint32
Synthetics bool
PeerTagsHash uint64
+ IsTraceRoot pb.TraceRootFlag
}
// PayloadAggregationKey specifies the key by which a payload is aggregated.
@@ -77,16 +78,23 @@ func clientOrProducer(spanKind string) bool {
// NewAggregationFromSpan creates a new aggregation from the provided span and env
func NewAggregationFromSpan(s *pb.Span, origin string, aggKey PayloadAggregationKey, enablePeerTagsAgg bool, peerTagKeys []string) (Aggregation, []string) {
synthetics := strings.HasPrefix(origin, tagSynthetics)
+ var isTraceRoot pb.TraceRootFlag
+ if s.ParentID == 0 {
+ isTraceRoot = pb.TraceRootFlag_TRUE
+ } else {
+ isTraceRoot = pb.TraceRootFlag_FALSE
+ }
agg := Aggregation{
PayloadAggregationKey: aggKey,
BucketsAggregationKey: BucketsAggregationKey{
- Resource: s.Resource,
- Service: s.Service,
- Name: s.Name,
- SpanKind: s.Meta[tagSpanKind],
- Type: s.Type,
- StatusCode: getStatusCode(s),
- Synthetics: synthetics,
+ Resource: s.Resource,
+ Service: s.Service,
+ Name: s.Name,
+ SpanKind: s.Meta[tagSpanKind],
+ Type: s.Type,
+ StatusCode: getStatusCode(s),
+ Synthetics: synthetics,
+ IsTraceRoot: isTraceRoot,
},
}
var peerTags []string
@@ -138,6 +146,7 @@ func NewAggregationFromGroup(g *pb.ClientGroupedStats) Aggregation {
StatusCode: g.HTTPStatusCode,
Synthetics: g.Synthetics,
PeerTagsHash: peerTagsHash(g.PeerTags),
+ IsTraceRoot: g.IsTraceRoot,
},
}
}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/client_stats_aggregator.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/client_stats_aggregator.go
index ef84920187..f1e6f0672c 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/client_stats_aggregator.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/client_stats_aggregator.go
@@ -6,9 +6,10 @@
package stats
import (
- "github.com/DataDog/datadog-agent/pkg/trace/version"
"time"
+ "github.com/DataDog/datadog-agent/pkg/trace/version"
+
pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace"
"github.com/DataDog/datadog-agent/pkg/trace/config"
"github.com/DataDog/datadog-agent/pkg/trace/log"
@@ -294,6 +295,7 @@ func (b *bucket) aggregationToPayloads() []*pb.ClientStatsPayload {
HTTPStatusCode: aggrKey.StatusCode,
Type: aggrKey.Type,
Synthetics: aggrKey.Synthetics,
+ IsTraceRoot: aggrKey.IsTraceRoot,
PeerTags: counts.peerTags,
Hits: counts.hits,
Errors: counts.errors,
@@ -332,13 +334,14 @@ func newPayloadAggregationKey(env, hostname, version, cid string, gitCommitSha s
func newBucketAggregationKey(b *pb.ClientGroupedStats, enablePeerTagsAgg bool) BucketsAggregationKey {
k := BucketsAggregationKey{
- Service: b.Service,
- Name: b.Name,
- SpanKind: b.SpanKind,
- Resource: b.Resource,
- Type: b.Type,
- Synthetics: b.Synthetics,
- StatusCode: b.HTTPStatusCode,
+ Service: b.Service,
+ Name: b.Name,
+ SpanKind: b.SpanKind,
+ Resource: b.Resource,
+ Type: b.Type,
+ Synthetics: b.Synthetics,
+ StatusCode: b.HTTPStatusCode,
+ IsTraceRoot: b.IsTraceRoot,
}
if enablePeerTagsAgg {
k.PeerTagsHash = peerTagsHash(b.GetPeerTags())
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/statsraw.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/statsraw.go
index 8e8d7e8958..d1407dfb8d 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/statsraw.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/stats/statsraw.go
@@ -76,6 +76,7 @@ func (s *groupedStats) export(a Aggregation) (*pb.ClientGroupedStats, error) {
Synthetics: a.Synthetics,
SpanKind: a.SpanKind,
PeerTags: s.peerTags,
+ IsTraceRoot: a.IsTraceRoot,
}, nil
}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/azure.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/azure.go
index 3e7834883c..111ecc6689 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/azure.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/azure.go
@@ -40,8 +40,6 @@ const (
// GetAppServicesTags returns the env vars pulled from the Azure App Service instance.
// In some cases we will need to add extra tags for function apps.
-//
-//nolint:revive // TODO(APM) Fix revive linter
func GetAppServicesTags() map[string]string {
siteName := os.Getenv("WEBSITE_SITE_NAME")
ownerName := os.Getenv("WEBSITE_OWNER_NAME")
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/normalize.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/normalize.go
index 592f596b03..0cda4398d7 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/normalize.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/normalize.go
@@ -25,6 +25,8 @@ const (
MaxNameLen = 100
// MaxServiceLen the maximum length a service can have
MaxServiceLen = 100
+ // MaxResourceLen the maximum length a resource can have
+ MaxResourceLen = 5000
)
var (
@@ -268,8 +270,8 @@ func normMetricNameParse(name string) (string, bool) {
res := make([]byte, 0, len(name))
// skip non-alphabetic characters
- //nolint:revive // TODO(APM) Fix revive linter
for ; i < len(name) && !isAlpha(name[i]); i++ {
+ continue
}
// if there were no alphabetic characters it wasn't valid
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/otel_util.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/otel_util.go
new file mode 100644
index 0000000000..67a2dd9b0b
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/otel_util.go
@@ -0,0 +1,283 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package traceutil
+
+import (
+ "context"
+ "strings"
+
+ "github.com/DataDog/datadog-agent/pkg/trace/log"
+ "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes"
+ "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/ptrace"
+ semconv117 "go.opentelemetry.io/collector/semconv/v1.17.0"
+ semconv "go.opentelemetry.io/collector/semconv/v1.6.1"
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Util functions for converting OTel semantics to DD semantics.
+// TODO(OTEL-1726): reuse the same mapping code for ReceiveResourceSpans and Concentrator
+
+var (
+ // SignalTypeSet is the OTel attribute set for traces.
+ SignalTypeSet = attribute.NewSet(attribute.String("signal", "traces"))
+)
+
+// IndexOTelSpans iterates over the input OTel spans and returns 3 maps:
+// OTel spans indexed by span ID, OTel resources indexed by span ID, OTel instrumentation scopes indexed by span ID.
+// Skips spans with invalid trace ID or span ID. If there are multiple spans with the same (non-zero) span ID, the last one wins.
+func IndexOTelSpans(traces ptrace.Traces) (map[pcommon.SpanID]ptrace.Span, map[pcommon.SpanID]pcommon.Resource, map[pcommon.SpanID]pcommon.InstrumentationScope) {
+ spanByID := make(map[pcommon.SpanID]ptrace.Span)
+ resByID := make(map[pcommon.SpanID]pcommon.Resource)
+ scopeByID := make(map[pcommon.SpanID]pcommon.InstrumentationScope)
+ rspanss := traces.ResourceSpans()
+ for i := 0; i < rspanss.Len(); i++ {
+ rspans := rspanss.At(i)
+ res := rspans.Resource()
+ for j := 0; j < rspans.ScopeSpans().Len(); j++ {
+ libspans := rspans.ScopeSpans().At(j)
+ for k := 0; k < libspans.Spans().Len(); k++ {
+ span := libspans.Spans().At(k)
+ if span.TraceID().IsEmpty() || span.SpanID().IsEmpty() {
+ continue
+ }
+ spanByID[span.SpanID()] = span
+ resByID[span.SpanID()] = res
+ scopeByID[span.SpanID()] = libspans.Scope()
+ }
+ }
+ }
+ return spanByID, resByID, scopeByID
+}
+
+// GetTopLevelOTelSpans returns the span IDs of the top level OTel spans.
+func GetTopLevelOTelSpans(spanByID map[pcommon.SpanID]ptrace.Span, resByID map[pcommon.SpanID]pcommon.Resource, topLevelByKind bool) map[pcommon.SpanID]struct{} {
+ topLevelSpans := make(map[pcommon.SpanID]struct{})
+ for spanID, span := range spanByID {
+ if span.ParentSpanID().IsEmpty() {
+ // case 1: root span
+ topLevelSpans[spanID] = struct{}{}
+ continue
+ }
+
+ if topLevelByKind {
+ // New behavior for computing top level OTel spans, see computeTopLevelAndMeasured in pkg/trace/api/otlp.go
+ spanKind := span.Kind()
+ if spanKind == ptrace.SpanKindServer || spanKind == ptrace.SpanKindConsumer {
+ // span is a server-side span, mark as top level
+ topLevelSpans[spanID] = struct{}{}
+ }
+ continue
+ }
+
+ // Otherwise, fall back to old behavior in ComputeTopLevel
+ parentSpan, ok := spanByID[span.ParentSpanID()]
+ if !ok {
+ // case 2: parent span not in the same chunk, presumably it belongs to another service
+ topLevelSpans[spanID] = struct{}{}
+ continue
+ }
+
+ svc := GetOTelService(span, resByID[spanID], true)
+ parentSvc := GetOTelService(parentSpan, resByID[parentSpan.SpanID()], true)
+ if svc != parentSvc {
+ // case 3: parent is not in the same service
+ topLevelSpans[spanID] = struct{}{}
+ }
+ }
+ return topLevelSpans
+}
+
+// GetOTelAttrVal returns the matched value as a string in the input map with the given keys.
+// If there are multiple keys present, the first matched one is returned.
+// If normalize is true, normalize the return value with NormalizeTagValue.
+func GetOTelAttrVal(attrs pcommon.Map, normalize bool, keys ...string) string {
+ val := ""
+ for _, key := range keys {
+ attrval, exists := attrs.Get(key)
+ if exists {
+ val = attrval.AsString()
+ }
+ }
+
+ if normalize {
+ val = NormalizeTagValue(val)
+ }
+
+ return val
+}
+
+// GetOTelAttrValInResAndSpanAttrs returns the matched value as a string in the OTel resource attributes and span attributes with the given keys.
+// If there are multiple keys present, the first matched one is returned.
+// If the key is present in both resource attributes and span attributes, resource attributes take precedence.
+// If normalize is true, normalize the return value with NormalizeTagValue.
+func GetOTelAttrValInResAndSpanAttrs(span ptrace.Span, res pcommon.Resource, normalize bool, keys ...string) string {
+ if val := GetOTelAttrVal(res.Attributes(), normalize, keys...); val != "" {
+ return val
+ }
+ return GetOTelAttrVal(span.Attributes(), normalize, keys...)
+}
+
+// GetOTelSpanType returns the DD span type based on OTel span kind and attributes.
+func GetOTelSpanType(span ptrace.Span, res pcommon.Resource) string {
+ var typ string
+ switch span.Kind() {
+ case ptrace.SpanKindServer:
+ typ = "web"
+ case ptrace.SpanKindClient:
+ db := GetOTelAttrValInResAndSpanAttrs(span, res, true, semconv.AttributeDBSystem)
+ if db == "redis" || db == "memcached" {
+ typ = "cache"
+ } else if db != "" {
+ typ = "db"
+ } else {
+ typ = "http"
+ }
+ default:
+ typ = "custom"
+ }
+ return typ
+}
+
+// GetOTelService returns the DD service name based on OTel span and resource attributes.
+func GetOTelService(span ptrace.Span, res pcommon.Resource, normalize bool) string {
+ // No need to normalize with NormalizeTagValue since we will do NormalizeService later
+ svc := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeServiceName)
+ if svc == "" {
+ svc = "otlpresourcenoservicename"
+ }
+ if normalize {
+ newsvc, err := NormalizeService(svc, "")
+ switch err {
+ case ErrTooLong:
+ log.Debugf("Fixing malformed trace. Service is too long (reason:service_truncate), truncating span.service to length=%d: %s", MaxServiceLen, svc)
+ case ErrInvalid:
+ log.Debugf("Fixing malformed trace. Service is invalid (reason:service_invalid), replacing invalid span.service=%s with fallback span.service=%s", svc, newsvc)
+ }
+ svc = newsvc
+ }
+ return svc
+}
+
+// GetOTelResource returns the DD resource name based on OTel span and resource attributes.
+func GetOTelResource(span ptrace.Span, res pcommon.Resource) (resName string) {
+ resName = GetOTelAttrValInResAndSpanAttrs(span, res, false, "resource.name")
+ if resName == "" {
+ if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeHTTPMethod); m != "" {
+ // use the HTTP method + route (if available)
+ resName = m
+ if route := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeHTTPRoute); route != "" {
+ resName = resName + " " + route
+ }
+ } else if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeMessagingOperation); m != "" {
+ resName = m
+ // use the messaging operation
+ if dest := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeMessagingDestination, semconv117.AttributeMessagingDestinationName); dest != "" {
+ resName = resName + " " + dest
+ }
+ } else if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeRPCMethod); m != "" {
+ resName = m
+ // use the RPC method
+ if svc := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeRPCService); m != "" {
+ // ...and service if available
+ resName = resName + " " + svc
+ }
+ } else {
+ resName = span.Name()
+ }
+ }
+ if len(resName) > MaxResourceLen {
+ resName = resName[:MaxResourceLen]
+ }
+ return
+}
+
+// GetOTelOperationName returns the DD operation name based on OTel span and resource attributes and given configs.
+func GetOTelOperationName(
+ span ptrace.Span,
+ res pcommon.Resource,
+ lib pcommon.InstrumentationScope,
+ spanNameAsResourceName bool,
+ spanNameRemappings map[string]string,
+ normalize bool) string {
+ // No need to normalize with NormalizeTagValue since we will do NormalizeName later
+ name := GetOTelAttrValInResAndSpanAttrs(span, res, false, "operation.name")
+ if name == "" {
+ if spanNameAsResourceName {
+ name = span.Name()
+ } else {
+ name = strings.ToLower(span.Kind().String())
+ if lib.Name() != "" {
+ name = lib.Name() + "." + name
+ } else {
+ name = "opentelemetry." + name
+ }
+ }
+ }
+ if v, ok := spanNameRemappings[name]; ok {
+ name = v
+ }
+
+ if normalize {
+ normalizeName, err := NormalizeName(name)
+ switch err {
+ case ErrEmpty:
+ log.Debugf("Fixing malformed trace. Name is empty (reason:span_name_empty), setting span.name=%s", normalizeName)
+ case ErrTooLong:
+ log.Debugf("Fixing malformed trace. Name is too long (reason:span_name_truncate), truncating span.name to length=%d", MaxServiceLen)
+ case ErrInvalid:
+ log.Debugf("Fixing malformed trace. Name is invalid (reason:span_name_invalid), setting span.name=%s", normalizeName)
+ }
+ name = normalizeName
+ }
+
+ return name
+}
+
+// GetOTelHostname returns the DD hostname based on OTel span and resource attributes.
+func GetOTelHostname(span ptrace.Span, res pcommon.Resource, tr *attributes.Translator, fallbackHost string) string {
+ ctx := context.Background()
+ src, srcok := tr.ResourceToSource(ctx, res, SignalTypeSet)
+ if !srcok {
+ if v := GetOTelAttrValInResAndSpanAttrs(span, res, false, "_dd.hostname"); v != "" {
+ src = source.Source{Kind: source.HostnameKind, Identifier: v}
+ srcok = true
+ }
+ }
+ if srcok {
+ switch src.Kind {
+ case source.HostnameKind:
+ return src.Identifier
+ default:
+ // We are not on a hostname (serverless), hence the hostname is empty
+ return ""
+ }
+ } else {
+ // fallback hostname from Agent conf.Hostname
+ return fallbackHost
+ }
+}
+
+// GetOTelStatusCode returns the DD status code of the OTel span.
+func GetOTelStatusCode(span ptrace.Span) uint32 {
+ if code, ok := span.Attributes().Get(semconv.AttributeHTTPStatusCode); ok {
+ return uint32(code.Int())
+ }
+ return 0
+}
+
+// GetOTelContainerTags returns a list of DD container tags in the OTel resource attributes.
+// Tags are always normalized.
+func GetOTelContainerTags(rattrs pcommon.Map) []string {
+ var containerTags []string
+ containerTagsMap := attributes.ContainerTagsFromResourceAttributes(rattrs)
+ for k, v := range containerTagsMap {
+ t := NormalizeTag(k + ":" + v)
+ containerTags = append(containerTags, t)
+ }
+ return containerTags
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/processed_trace.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/processed_trace.go
index a4f2c153d7..913e412764 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/processed_trace.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/processed_trace.go
@@ -5,7 +5,9 @@
package traceutil
-import pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace"
+import (
+ pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace"
+)
// ProcessedTrace represents a trace being processed in the agent.
type ProcessedTrace struct {
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/span.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/span.go
index b3a00834f7..c889179288 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/span.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/traceutil/span.go
@@ -64,6 +64,20 @@ func SetTopLevel(s *pb.Span, topLevel bool) {
SetMetric(s, topLevelKey, 1)
}
+// SetMeasured sets the measured attribute of the span.
+func SetMeasured(s *pb.Span, measured bool) {
+ if !measured {
+ if s.Metrics == nil {
+ return
+ }
+ delete(s.Metrics, measuredKey)
+ return
+ }
+ // Setting the metrics value, so that code downstream in the pipeline
+ // can identify this as top-level without recomputing everything.
+ SetMetric(s, measuredKey, 1)
+}
+
// SetMetric sets the metric at key to the val on the span s.
func SetMetric(s *pb.Span, key string, val float64) {
if s.Metrics == nil {
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/cpu.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/cpu.go
index 6a0c248cbb..a1799701ab 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/cpu.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/cpu.go
@@ -5,7 +5,7 @@
//go:build !windows && !aix
-//nolint:revive // TODO(APM) Fix revive linter
+// Package watchdog monitors the trace-agent resource usage.
package watchdog
import (
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/cpu_windows.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/cpu_windows.go
index 6a9449bdac..6613e30c51 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/cpu_windows.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/watchdog/cpu_windows.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//nolint:revive // TODO(APM) Fix revive linter
+// Package watchdog monitors the trace-agent resource usage.
package watchdog
import (
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/trace/writer/sender.go b/vendor/github.com/DataDog/datadog-agent/pkg/trace/writer/sender.go
index 6ed4bd5510..6df1131369 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/trace/writer/sender.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/trace/writer/sender.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
-//nolint:revive // TODO(APM) Fix revive linter
+// Package writer contains the logic for sending payloads to the Datadog intake.
package writer
import (
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/backoff/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/util/backoff/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/backoff/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/backoff/backoff.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/backoff/backoff.go
new file mode 100644
index 0000000000..87870e0d3e
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/backoff/backoff.go
@@ -0,0 +1,99 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package backoff provides backoff mechanisms
+package backoff
+
+import (
+ "math"
+ "math/rand"
+ "time"
+)
+
+// ExpBackoffPolicy contains parameters and logic necessary to implement an exponential backoff
+// strategy when handling errors.
+type ExpBackoffPolicy struct {
+ // MinBackoffFactor controls the overlap between consecutive retry interval ranges. When
+ // set to `2`, there is a guarantee that there will be no overlap. The overlap
+ // will asymptotically approach 50% the higher the value is set.
+ MinBackoffFactor float64
+
+ // BaseBackoffTime controls the rate of exponential growth. Also, you can calculate the start
+ // of the very first retry interval range by evaluating the following expression:
+ // baseBackoffTime / minBackoffFactor * 2
+ BaseBackoffTime float64
+
+ // MaxBackoffTime is the maximum number of seconds to wait for a retry.
+ MaxBackoffTime float64
+
+ // RecoveryInterval controls how many retry interval ranges to step down for an endpoint
+ // upon success. Increasing this should only be considered when maxBackoffTime
+ // is particularly high or if our intake team is particularly confident.
+ RecoveryInterval int
+
+ // MaxErrors derived value is the number of errors it will take to reach the maxBackoffTime.
+ MaxErrors int
+}
+
+const secondsFloat = float64(time.Second)
+
+func randomBetween(min, max float64) float64 {
+ return rand.Float64()*(max-min) + min
+}
+
+// NewExpBackoffPolicy constructs new Backoff object with given parameters
+func NewExpBackoffPolicy(minBackoffFactor, baseBackoffTime, maxBackoffTime float64, recoveryInterval int, recoveryReset bool) Policy {
+ maxErrors := int(math.Floor(math.Log2(maxBackoffTime/baseBackoffTime))) + 1
+
+ if recoveryReset {
+ recoveryInterval = maxErrors
+ }
+
+ return &ExpBackoffPolicy{
+ MinBackoffFactor: minBackoffFactor,
+ BaseBackoffTime: baseBackoffTime,
+ MaxBackoffTime: maxBackoffTime,
+ RecoveryInterval: recoveryInterval,
+ MaxErrors: maxErrors,
+ }
+}
+
+// GetBackoffDuration returns amount of time to sleep after numErrors error
+func (e *ExpBackoffPolicy) GetBackoffDuration(numErrors int) time.Duration {
+ var backoffTime float64
+
+ if numErrors > 0 {
+ backoffTime = e.BaseBackoffTime * math.Pow(2, float64(numErrors))
+
+ if backoffTime > e.MaxBackoffTime {
+ backoffTime = e.MaxBackoffTime
+ } else {
+ min := backoffTime / e.MinBackoffFactor
+ max := math.Min(e.MaxBackoffTime, backoffTime)
+ backoffTime = randomBetween(min, max)
+ }
+ }
+
+ return time.Duration(backoffTime * secondsFloat)
+
+}
+
+// IncError increments the error counter up to MaxErrors
+func (e *ExpBackoffPolicy) IncError(numErrors int) int {
+ numErrors++
+ if numErrors > e.MaxErrors {
+ return e.MaxErrors
+ }
+ return numErrors
+}
+
+// DecError decrements the error counter down to zero at RecoveryInterval rate
+func (e *ExpBackoffPolicy) DecError(numErrors int) int {
+ numErrors -= e.RecoveryInterval
+ if numErrors < 0 {
+ return 0
+ }
+ return numErrors
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/backoff/policy.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/backoff/policy.go
new file mode 100644
index 0000000000..a865dc3094
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/backoff/policy.go
@@ -0,0 +1,18 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package backoff
+
+import "time"
+
+// Policy is the common interface for all backoff policies
+type Policy interface {
+ // GetBackoffDuration returns the backoff duration for the given number of errors
+ GetBackoffDuration(numErrors int) time.Duration
+ // IncError increments the number of errors and returns the new value
+ IncError(numErrors int) int
+ // DecError decrements the number of errors and returns the new value
+ DecError(numErrors int) int
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/cgroups/reader.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/cgroups/reader.go
index dd2255b0bc..1778a4a560 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/util/cgroups/reader.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/cgroups/reader.go
@@ -19,9 +19,9 @@ import (
const (
// ContainerRegexpStr defines the regexp used to match container IDs
// ([0-9a-f]{64}) is standard container id used pretty much everywhere
- // ([0-9a-f]{32}-[0-9]{10}) is container id used by AWS ECS
+ // ([0-9a-f]{32}-\d+) is container id used by AWS ECS
// ([0-9a-f]{8}(-[0-9a-f]{4}){4}$) is container id used by Garden
- ContainerRegexpStr = "([0-9a-f]{64})|([0-9a-f]{32}-[0-9]{10})|([0-9a-f]{8}(-[0-9a-f]{4}){4}$)"
+ ContainerRegexpStr = "([0-9a-f]{64})|([0-9a-f]{32}-\\d+)|([0-9a-f]{8}(-[0-9a-f]{4}){4}$)"
)
// Reader is the main interface to scrape data from cgroups
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/executable/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/util/executable/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/executable/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/executable/executable.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/executable/executable.go
new file mode 100644
index 0000000000..6c9e9d6a82
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/executable/executable.go
@@ -0,0 +1,74 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package executable provides information on the executable that started the process
+// and utils to find other executables on the system
+package executable
+
+import (
+ "os/exec"
+ "path/filepath"
+
+ // TODO: Use the built-in "os" package as soon as it implements `Executable()`
+ // consistently across all platforms
+ "github.com/kardianos/osext"
+)
+
+func path(allowSymlinkFailure bool) (string, error) {
+ here, err := osext.Executable()
+ if err != nil {
+ return "", err
+ }
+ retstring, err := filepath.EvalSymlinks(here)
+ if err != nil {
+ if allowSymlinkFailure {
+ // return no error here, since we're allowing the symlink to fail
+ return here, nil
+ }
+ }
+ return retstring, err
+
+}
+
+// Folder returns the folder under which the executable is located,
+// after having resolved all symlinks to the executable.
+// Unlike os.Executable and osext.ExecutableFolder, Folder will
+// resolve the symlinks across all platforms.
+func Folder() (string, error) {
+ p, err := path(false)
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Dir(p), nil
+}
+
+// FolderAllowSymlinkFailure returns the folder under which the executable
+// is located, without resolving symbolic links.
+func FolderAllowSymlinkFailure() (string, error) {
+ p, err := path(true)
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Dir(p), nil
+}
+
+// ResolvePath resolves the absolute path to the executable program
+// with the given name in the argument. Returns error if the program's
+// path cannot be resolved.
+func ResolvePath(execName string) (string, error) {
+ execPath, err := exec.LookPath(execName)
+ if err != nil {
+ return "", err
+ }
+
+ execAbsPath, err := filepath.Abs(execPath)
+ if err != nil {
+ return "", err
+ }
+
+ return execAbsPath, nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/common.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/common.go
new file mode 100644
index 0000000000..2962ba65b6
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/common.go
@@ -0,0 +1,33 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package filesystem
+
+import (
+ "os"
+ "time"
+)
+
+// GetFileSize gets the file size
+func GetFileSize(path string) (int64, error) {
+ stat, err := os.Stat(path)
+
+ if err != nil {
+ return 0, err
+ }
+
+ return stat.Size(), nil
+}
+
+// GetFileModTime gets the modification time
+func GetFileModTime(path string) (time.Time, error) {
+ stat, err := os.Stat(path)
+
+ if err != nil {
+ return time.Time{}, err
+ }
+
+ return stat.ModTime(), nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/disk.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/disk.go
new file mode 100644
index 0000000000..a80165d0ca
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/disk.go
@@ -0,0 +1,30 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+//go:build !windows
+
+package filesystem
+
+import "github.com/shirou/gopsutil/v3/disk"
+
+// Disk gets information about the disk
+type Disk struct{}
+
+// NewDisk creates a new instance of Disk
+func NewDisk() Disk {
+ return Disk{}
+}
+
+// GetUsage gets the disk usage
+func (Disk) GetUsage(path string) (*DiskUsage, error) {
+ usage, err := disk.Usage(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return &DiskUsage{
+ Total: usage.Total,
+ Available: usage.Free,
+ }, nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/disk_usage.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/disk_usage.go
new file mode 100644
index 0000000000..b93ed4a860
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/disk_usage.go
@@ -0,0 +1,13 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package filesystem provides functions and types to interact with the filesystem
+package filesystem
+
+// DiskUsage is the disk usage
+type DiskUsage struct {
+ Total uint64
+ Available uint64
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/disk_windows.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/disk_windows.go
new file mode 100644
index 0000000000..9a6c247d50
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/disk_windows.go
@@ -0,0 +1,53 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+//go:build windows
+
+package filesystem
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+// Disk gets information about the disk
+type Disk struct {
+ procGetDiskFreeSpaceExW *windows.LazyProc
+}
+
+// NewDisk creates a new instance of Disk
+func NewDisk() Disk {
+ modkernel32 := windows.NewLazyDLL("kernel32.dll")
+ return Disk{
+ procGetDiskFreeSpaceExW: modkernel32.NewProc("GetDiskFreeSpaceExW"),
+ }
+}
+
+// GetUsage gets the disk usage
+func (d Disk) GetUsage(path string) (*DiskUsage, error) {
+ free := uint64(0)
+ total := uint64(0)
+
+ winPath, err := windows.UTF16PtrFromString(path)
+ if err != nil {
+ return nil, err
+ }
+
+ ret, _, err := d.procGetDiskFreeSpaceExW.Call(
+ uintptr(unsafe.Pointer(winPath)),
+ uintptr(unsafe.Pointer(&free)),
+ uintptr(unsafe.Pointer(&total)),
+ uintptr(unsafe.Pointer(nil)),
+ )
+
+ if ret == 0 {
+ return nil, err
+ }
+
+ return &DiskUsage{
+ Total: total,
+ Available: free,
+ }, nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/file.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/file.go
new file mode 100644
index 0000000000..b08a5b0cff
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/file.go
@@ -0,0 +1,33 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package filesystem
+
+import (
+ "bufio"
+ "os"
+)
+
+// FileExists returns true if a file exists and is accessible, false otherwise
+func FileExists(path string) bool {
+ _, err := os.Stat(path)
+ return err == nil
+}
+
+// ReadLines reads a file line by line
+func ReadLines(filename string) ([]string, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return []string{""}, err
+ }
+ defer f.Close()
+
+ var ret []string
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ ret = append(ret, scanner.Text())
+ }
+ return ret, scanner.Err()
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/open_nix.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/open_nix.go
new file mode 100644
index 0000000000..18d9f618a0
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/open_nix.go
@@ -0,0 +1,22 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build !windows
+
+package filesystem
+
+import "os"
+
+// OpenShared reimplements the os.Open function for Windows because the default
+// implementation opens files without the FILE_SHARE_DELETE flag.
+// cf: https://github.com/golang/go/blob/release-branch.go1.11/src/syscall/syscall_windows.go#L271
+// Without FILE_SHARE_DELETE, other users cannot rename/remove the file while
+// this handle is open. Adding this flag allows the agent to have the file open,
+// while not preventing it from being rotated/deleted.
+//
+// On non-Windows platforms, this calls through to os.Open directly.
+func OpenShared(path string) (*os.File, error) {
+ return os.Open(path)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/open_windows.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/open_windows.go
new file mode 100644
index 0000000000..a4ad9ccfc7
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/open_windows.go
@@ -0,0 +1,42 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build windows
+
+package filesystem
+
+import (
+ "os"
+
+ "golang.org/x/sys/windows"
+)
+
+// OpenShared reimplements the os.Open function for Windows because the default
+// implementation opens files without the FILE_SHARE_DELETE flag.
+// cf: https://github.com/golang/go/blob/release-branch.go1.11/src/syscall/syscall_windows.go#L271
+// Without FILE_SHARE_DELETE, other users cannot rename/remove the file while
+// this handle is open. Adding this flag allows the agent to have the file open,
+// while not preventing it from being rotated/deleted.
+//
+// On non-Windows platforms, this calls through to os.Open directly.
+func OpenShared(path string) (*os.File, error) {
+ pathp, err := windows.UTF16PtrFromString(path)
+ if err != nil {
+ return nil, err
+ }
+
+ access := uint32(windows.GENERIC_READ)
+ // add FILE_SHARE_DELETE that is missing from os.Open implementation
+ sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE | windows.FILE_SHARE_DELETE)
+ createmode := uint32(windows.OPEN_EXISTING)
+ var sa *windows.SecurityAttributes
+
+ r, err := windows.CreateFile(pathp, access, sharemode, sa, createmode, windows.FILE_ATTRIBUTE_NORMAL, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ return os.NewFile(uintptr(r), path), nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/permission_nowindows.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/permission_nowindows.go
new file mode 100644
index 0000000000..fcc2f37fc1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/permission_nowindows.go
@@ -0,0 +1,72 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build !windows
+
+package filesystem
+
+import (
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "os/user"
+ "strconv"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+// Permission handles permissions for Unix and Windows
+type Permission struct{}
+
+// NewPermission creates a new instance of `Permission`
+func NewPermission() (*Permission, error) {
+ return &Permission{}, nil
+}
+
+// RestrictAccessToUser sets the file user and group to the same as 'dd-agent' user. If the function fails to lookup
+// "dd-agent" user it return nil immediately.
+func (p *Permission) RestrictAccessToUser(path string) error {
+ usr, err := user.Lookup("dd-agent")
+ if err != nil {
+ return nil
+ }
+
+ usrID, err := strconv.Atoi(usr.Uid)
+ if err != nil {
+ return fmt.Errorf("couldn't parse UID (%s): %w", usr.Uid, err)
+ }
+
+ grpID, err := strconv.Atoi(usr.Gid)
+ if err != nil {
+ return fmt.Errorf("couldn't parse GID (%s): %w", usr.Gid, err)
+ }
+
+ if err = os.Chown(path, usrID, grpID); err != nil {
+ if errors.Is(err, fs.ErrPermission) {
+ log.Infof("Cannot change owner of '%s', permission denied", path)
+ return nil
+ }
+
+ return fmt.Errorf("couldn't set user and group owner for %s: %w", path, err)
+ }
+
+ return nil
+}
+
+// RemoveAccessToOtherUsers on Unix this calls RestrictAccessToUser and then removes all access to the file for 'group'
+// and 'other'
+func (p *Permission) RemoveAccessToOtherUsers(path string) error {
+ // We first try to set other and group to "dd-agent" when possible
+ _ = p.RestrictAccessToUser(path)
+
+ fperm, err := os.Stat(path)
+ if err != nil {
+ return err
+ }
+ // We keep the original 'user' rights but set 'group' and 'other' to zero.
+ newPerm := fperm.Mode().Perm() & 0700
+ return os.Chmod(path, fs.FileMode(newPerm))
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/permission_windows.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/permission_windows.go
new file mode 100644
index 0000000000..2ae2929ce1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/filesystem/permission_windows.go
@@ -0,0 +1,77 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+//go:build windows
+
+package filesystem
+
+import (
+ "fmt"
+ "syscall"
+
+ "github.com/hectane/go-acl"
+ "golang.org/x/sys/windows"
+)
+
+// Permission handles permissions for Unix and Windows
+type Permission struct {
+ currentUserSid *windows.SID
+ administratorSid *windows.SID
+ systemSid *windows.SID
+}
+
+// NewPermission creates a new instance of `Permission`
+func NewPermission() (*Permission, error) {
+ administratorSid, err := windows.StringToSid("S-1-5-32-544")
+ if err != nil {
+ return nil, err
+ }
+ systemSid, err := windows.StringToSid("S-1-5-18")
+ if err != nil {
+ return nil, err
+ }
+
+ currentUserSid, err := getCurrentUserSid()
+ if err != nil {
+ return nil, fmt.Errorf("Unable to get current user sid %v", err)
+ }
+ return &Permission{
+ currentUserSid: currentUserSid,
+ administratorSid: administratorSid,
+ systemSid: systemSid,
+ }, nil
+}
+
+func getCurrentUserSid() (*windows.SID, error) {
+ token, err := syscall.OpenCurrentProcessToken()
+ if err != nil {
+ return nil, fmt.Errorf("Couldn't get process token %v", err)
+ }
+ defer token.Close()
+ user, err := token.GetTokenUser()
+ if err != nil {
+ return nil, fmt.Errorf("Couldn't get token user %v", err)
+ }
+ sidString, err := user.User.Sid.String()
+ if err != nil {
+ return nil, fmt.Errorf("Couldn't get user sid string %v", err)
+ }
+ return windows.StringToSid(sidString)
+}
+
+// RestrictAccessToUser update the ACL of a file so only the current user and ADMIN/SYSTEM can access it
+func (p *Permission) RestrictAccessToUser(path string) error {
+ return acl.Apply(
+ path,
+ true, // replace the file permissions
+ false, // don't inherit
+ acl.GrantSid(windows.GENERIC_ALL, p.administratorSid),
+ acl.GrantSid(windows.GENERIC_ALL, p.systemSid),
+ acl.GrantSid(windows.GENERIC_ALL, p.currentUserSid))
+}
+
+// RemoveAccessToOtherUsers on Windows this function calls RestrictAccessToUser
+func (p *Permission) RemoveAccessToOtherUsers(path string) error {
+ return p.RestrictAccessToUser(path)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/args.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/args.go
new file mode 100644
index 0000000000..6c7c5fcca3
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/args.go
@@ -0,0 +1,80 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package fxutil
+
+import (
+ "reflect"
+
+ "go.uber.org/fx"
+)
+
+var errorInterface = reflect.TypeOf((*error)(nil)).Elem()
+
+// delayedFxInvocation delays execution of a function, while allowing
+// Fx to provide its arguments.
+type delayedFxInvocation struct {
+ fn interface{}
+ ftype reflect.Type
+ args []reflect.Value
+}
+
+// newDelayedFxInvocation creates a new delayedFxInvocation wrapping the given
+// function.
+//
+// The given function can have any number of arguments that will be supplied with Fx.
+// It must return nothing or an error.
+func newDelayedFxInvocation(fn interface{}) *delayedFxInvocation {
+ ftype := reflect.TypeOf(fn)
+ if ftype == nil || ftype.Kind() != reflect.Func {
+ panic("delayedFxInvocation requires a function as its first argument")
+ }
+
+ // verify it returns error
+ if ftype.NumOut() > 1 || (ftype.NumOut() == 1 && !ftype.Out(0).Implements(errorInterface)) {
+ panic("delayedFxInvocation function must return error or nothing")
+ }
+
+ return &delayedFxInvocation{fn: fn, ftype: ftype}
+}
+
+// option generates the fx.Option value to include in an fx.App that will
+// provide the argument values.
+func (i *delayedFxInvocation) option() fx.Option {
+ // build an function with the same signature as i.fn that will
+ // capture the args and do nothing.
+ captureArgs := reflect.MakeFunc(
+ i.ftype,
+ func(args []reflect.Value) []reflect.Value {
+ i.args = args
+ // return nothing or a single nil value of type error
+ if i.ftype.NumOut() == 0 {
+ return []reflect.Value{}
+ }
+ return []reflect.Value{reflect.Zero(errorInterface)}
+ })
+
+ // fx.Invoke that function to capture the args at startup
+ return fx.Invoke(captureArgs.Interface())
+}
+
+// call calls the underlying function. The fx.App must have already supplied
+// the arguments at this time. If the delayed function has no return value, then
+// this will always return nil.
+func (i *delayedFxInvocation) call() error {
+ if i.args == nil {
+ panic("delayedFxInvocation args have not yet been provided")
+ }
+
+ // call the original function with the args captured during app startup
+ res := reflect.ValueOf(i.fn).Call(i.args)
+
+ // and return an error if the function returned any non-nil value
+ if len(res) > 0 && !res[0].IsNil() {
+ err := res[0].Interface().(error)
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/createcomponent.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/createcomponent.go
new file mode 100644
index 0000000000..fed34b2f92
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/createcomponent.go
@@ -0,0 +1,94 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package fxutil
+
+import (
+ "fmt"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "go.uber.org/fx"
+)
+
+// Module is a fx.Module for Component with an exported field "Options" to list options
+type Module struct {
+ fx.Option
+ Options []fx.Option
+}
+
+// Component is a simple wrapper around fx.Module that automatically determines
+// the component name.
+func Component(opts ...fx.Option) Module {
+ return Module{
+ Option: fx.Module(getComponentName(), opts...),
+ Options: opts,
+ }
+}
+
+// getComponentName gets the component name of the caller's caller.
+//
+// This must be a package of the form
+// `github.com/DataDog/datadog-agent/comp//module` or
+// `github.com/DataDog/datadog-agent/comp//impl/module`
+func getComponentName() string {
+ _, filename, _, ok := runtime.Caller(2)
+ if !ok {
+ panic("cannot determine component name")
+ }
+ filename = filepath.ToSlash(filename)
+ components := strings.Split(filename, "/")
+
+ // need for testing: tests in this folder should not fail for defining components outside of "comp/" folder.
+ if len(components) >= 2 && components[len(components)-2] == "fxutil" {
+ return "fxutil"
+ }
+ // TODO: (components) Remove this check when all components will be migrated to the new files organisation.
+ if len(components) >= 4 && components[len(components)-4] == "comp" {
+ return fmt.Sprintf("comp/%s/%s", components[len(components)-3], components[len(components)-2])
+ }
+ if len(components) >= 5 && components[len(components)-5] == "comp" {
+ return fmt.Sprintf("comp/%s/%s", components[len(components)-4], components[len(components)-3])
+ }
+
+ panic("must be called from a component (comp///component.go)")
+}
+
+// BundleOptions is a fx.Module for Bundle with an exported field "Options" to list options
+type BundleOptions struct {
+ fx.Option
+ Options []fx.Option
+}
+
+// Bundle is a simple wrapper around fx.Module that automatically determines
+// the bundle name.
+func Bundle(opts ...fx.Option) BundleOptions {
+ return BundleOptions{
+ Option: fx.Module(getBundleName(), opts...),
+ Options: opts,
+ }
+}
+
+// getBundleName gets the bundle name of the caller's caller.
+//
+// This must be a package of the form
+// `github.com/DataDog/datadog-agent/comp/`.
+func getBundleName() string {
+ //FIXME: this will break when updating to a version of Go containing the following commit
+ // https://github.com/golang/go/commit/88cb17e1069bef854ead49c703262abdf93c9458
+ // ie. update to go 1.22.x
+ // Changing the value of skip to 3 should fix it.
+ _, filename, _, ok := runtime.Caller(2)
+ if !ok {
+ panic("cannot determine bundle name")
+ }
+ filename = filepath.ToSlash(filename)
+ components := strings.Split(filename, "/")
+ if len(components) >= 3 && components[len(components)-3] == "comp" {
+ return fmt.Sprintf("comp/%s", components[len(components)-2])
+ }
+ panic("must be called from a bundle (comp//bundle.go)")
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/doc.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/doc.go
new file mode 100644
index 0000000000..b52a53f7e4
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/doc.go
@@ -0,0 +1,7 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package fxutil provides utilities for interacting with fx.
+package fxutil
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/errorunwrapper.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/errorunwrapper.go
new file mode 100644
index 0000000000..a64f84895d
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/errorunwrapper.go
@@ -0,0 +1,25 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package fxutil
+
+import (
+ "errors"
+ "reflect"
+ "regexp"
+)
+
+// UnwrapIfErrArgumentsFailed unwrap the error if the error was returned by an FX invoke method otherwise return the error.
+func UnwrapIfErrArgumentsFailed(err error) error {
+ // This is a workaround until https://github.com/uber-go/fx/issues/988 will be done.
+ if reflect.TypeOf(err).Name() == "errArgumentsFailed" {
+ re := regexp.MustCompile(`.*received non-nil error from function.*\(.*\): (.*)`)
+ matches := re.FindStringSubmatch(err.Error())
+ if len(matches) == 2 {
+ return errors.New(matches[1])
+ }
+ }
+ return err
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/group.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/group.go
new file mode 100644
index 0000000000..829899a099
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/group.go
@@ -0,0 +1,34 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package fxutil
+
+import (
+ "reflect"
+ "slices"
+)
+
+// GetAndFilterGroup filters 'zero' values from an FX group.
+//
+// A 'zero' value, nil in most cases, can be injected into a group whem a component declares returning a element for
+// that group but don't actually creates the element. This is common pattern with component that can be disabled or
+// partially enabled.
+//
+// This should be called in every component's constructor that requires an FX group as a dependency.
+func GetAndFilterGroup[S ~[]E, E any](group S) S {
+ return slices.DeleteFunc(group, func(item E) bool {
+ // if item is an untyped nil, aka interface{}(nil), we filter them directly
+ t := reflect.TypeOf(item)
+ if t == nil {
+ return true
+ }
+
+ switch t.Kind() {
+ case reflect.Pointer, reflect.Map, reflect.Array, reflect.Chan, reflect.Slice, reflect.Func, reflect.Interface:
+ return reflect.ValueOf(item).IsNil()
+ }
+ return false
+ })
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/logging.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/logging.go
new file mode 100644
index 0000000000..0b17166529
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/logging.go
@@ -0,0 +1,26 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package fxutil
+
+import (
+ "os"
+
+ "go.uber.org/fx"
+ "go.uber.org/fx/fxevent"
+)
+
+// FxLoggingOption creates an fx.Option to configure the Fx logger, either to do nothing
+// (the default) or to log to the console (when TRACE_FX is set).
+func FxLoggingOption() fx.Option {
+ return fx.WithLogger(
+ func() fxevent.Logger {
+ if os.Getenv("TRACE_FX") == "" {
+ return fxevent.NopLogger
+ }
+ return &fxevent.ConsoleLogger{W: os.Stderr}
+ },
+ )
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/oneshot.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/oneshot.go
new file mode 100644
index 0000000000..e4eb049158
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/oneshot.go
@@ -0,0 +1,72 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package fxutil
+
+import (
+ "context"
+ "errors"
+
+ "go.uber.org/fx"
+)
+
+// OneShot runs the given function in an fx.App using the supplied options.
+// The function's arguments are supplied by Fx and can be any provided type.
+// The function must return `error` or nothing.
+//
+// The resulting app starts all components, then invokes the function, then
+// immediately shuts down. This is typically used for command-line tools like
+// `agent status`.
+func OneShot(oneShotFunc interface{}, opts ...fx.Option) error {
+ if fxAppTestOverride != nil {
+ return fxAppTestOverride(oneShotFunc, opts)
+ }
+
+ // Use a delayed Fx invocation to capture arguments for oneShotFunc during
+ // application setup, but not actually invoke the question until all
+ // lifecycle start hooks have completed. Order of lifecycle start hooks is
+ // partially ordered by dependencies, but there is no way to guarantee "run
+ // this function last".
+ delayedCall := newDelayedFxInvocation(oneShotFunc)
+
+ opts = append(opts,
+ delayedCall.option(),
+ FxLoggingOption(),
+ fx.Provide(newFxLifecycleAdapter),
+ )
+ // Temporarily increase timeout for all fxutil.OneShot calls until we can better characterize our
+ // start time requirements. Prepend to opts so individual calls can override the timeout.
+ opts = append(
+ []fx.Option{TemporaryAppTimeouts()},
+ opts...,
+ )
+ app := fx.New(opts...)
+
+ // start the app
+ startCtx, cancel := context.WithTimeout(context.Background(), app.StartTimeout())
+ defer cancel()
+ if err := app.Start(startCtx); err != nil {
+ return errors.Join(UnwrapIfErrArgumentsFailed(err), stopApp(app))
+ }
+
+ // call the original oneShotFunc with the args captured during app startup
+ err := delayedCall.call()
+ if err != nil {
+ return errors.Join(err, stopApp(app))
+ }
+
+ return stopApp(app)
+}
+
+func stopApp(app *fx.App) error {
+ // stop the app
+ stopCtx, cancel := context.WithTimeout(context.Background(), app.StopTimeout())
+ defer cancel()
+ if err := app.Stop(stopCtx); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/provide_comp.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/provide_comp.go
new file mode 100644
index 0000000000..6c0737a77d
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/provide_comp.go
@@ -0,0 +1,278 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package fxutil
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ compdef "github.com/DataDog/datadog-agent/comp/def"
+ "go.uber.org/fx"
+)
+
+var (
+ compOutType = reflect.TypeOf((*compdef.Out)(nil)).Elem()
+ fxInType = reflect.TypeOf(fx.In{})
+ fxOutType = reflect.TypeOf(fx.Out{})
+)
+
+// ProvideComponentConstructor takes as input a Component constructor function
+// that uses plain (non-fx aware) structs as its argument and return value, and
+// returns an fx.Provide'd Option that will properly include that Component
+// into the fx constructor graph.
+//
+// For example, given:
+//
+// type Provides struct {
+// My MyComponent
+// }
+// type Requires struct {
+// Dep MyDependency
+// }
+// func NewComponent(reqs Requires) Provides { ... }
+//
+// then:
+//
+// ProvideComponentConstructor(NewComponent)
+//
+// will create these anonymous types:
+//
+// type FxAwareProvides struct {
+// fx.Out
+// My MyComponent
+// }
+// type FxAwareRequires struct {
+// fx.In
+// Dep MyDependency
+// }
+//
+// and then Provide those types into fx's dependency graph
+func ProvideComponentConstructor(compCtorFunc interface{}) fx.Option {
+ // type-check the input argument to the constructor
+ ctorFuncType := reflect.TypeOf(compCtorFunc)
+ if ctorFuncType.Kind() != reflect.Func || ctorFuncType.NumIn() > 1 || ctorFuncType.NumOut() == 0 || ctorFuncType.NumOut() > 2 {
+ return fx.Error(errors.New("argument must be a function with 0 or 1 arguments, and 1 or 2 return values"))
+ }
+ if ctorFuncType.NumIn() > 0 && ctorFuncType.In(0).Kind() != reflect.Struct {
+ return fx.Error(errors.New(`constructor must either take 0 arguments, or 1 "requires" struct`))
+ }
+ hasZeroArg := ctorFuncType.NumIn() == 0
+
+ inFxType, outFxType, hasErrRet, err := constructFxInAndOut(ctorFuncType)
+ if err != nil {
+ return fx.Error(err)
+ }
+
+ // build reflect.Type of the constructor function that will be provided to `fx.Provide`
+ funcFxType := reflect.FuncOf([]reflect.Type{inFxType}, []reflect.Type{outFxType}, false)
+ if hasErrRet {
+ funcFxType = reflect.FuncOf([]reflect.Type{inFxType}, []reflect.Type{outFxType, errorInterface}, false)
+ }
+
+ // wrapper that receives fx-aware requirements, converts them into regular requirements, and calls the
+ // constructor function value that will inform fx what the Components are
+ fxAwareProviderFunc := reflect.MakeFunc(funcFxType, func(args []reflect.Value) []reflect.Value {
+ // invoke the regular constructor with the correct arguments
+ var ctorArgs []reflect.Value
+ if !hasZeroArg {
+ ctorArgs = makeConstructorArgs(args[0])
+ }
+ plainOuts := reflect.ValueOf(compCtorFunc).Call(ctorArgs)
+ // create return value, an fx-ware provides struct and an optional error
+ res := []reflect.Value{makeFxAwareProvides(plainOuts[0], outFxType)}
+ if hasErrRet {
+ res = append(res, plainOuts[1])
+ }
+ return res
+ })
+
+ return fx.Provide(fxAwareProviderFunc.Interface())
+}
+
+// get the element at the index if the index is within the limit
+func getWithinLimit[T any](index int, get func(int) T, limit func() int) T {
+ if index < limit() {
+ return get(index)
+ }
+ var zero T
+ return zero
+}
+
+// create a struct that represents the (possibly nil) input type
+func asStruct(typ reflect.Type) (reflect.Type, error) {
+ if typ == nil {
+ return reflect.StructOf([]reflect.StructField{}), nil
+ }
+ if typ.Kind() == reflect.Interface {
+ return reflect.StructOf([]reflect.StructField{{Name: typ.Name(), Type: typ}}), nil
+ }
+ if typ.Kind() == reflect.Struct {
+ return typ, nil
+ }
+ return nil, fmt.Errorf("unexpected argument: %T, must be struct or interface", typ)
+}
+
+// create a struct field for embedding the type as an anonymous field
+func toEmbedField(typ reflect.Type) reflect.StructField {
+ return reflect.StructField{Type: typ, Name: typ.Name(), Anonymous: true}
+}
+
+// return true if the type is an error, or false if it is nil, return an error otherwise
+func ensureErrorOrNil(typ reflect.Type) (bool, error) {
+ if typ == nil {
+ return false, nil
+ }
+ if typ == reflect.TypeOf((*error)(nil)).Elem() {
+ return true, nil
+ }
+ return false, fmt.Errorf("second return value must be error, got %v", typ)
+}
+
+// return true if the struct type has an embed field of the given type
+func hasEmbedField(typ, embed reflect.Type) bool {
+ if typ.Kind() != reflect.Struct {
+ return false
+ }
+ for i := 0; i < typ.NumField(); i++ {
+ if typ.Field(i).Type == embed {
+ return true
+ }
+ }
+ return false
+}
+
+// construct fx-aware types for the input and output of the given constructor function
+func constructFxInAndOut(ctorFuncType reflect.Type) (reflect.Type, reflect.Type, bool, error) {
+ ctorInType, err1 := asStruct(getWithinLimit(0, ctorFuncType.In, ctorFuncType.NumIn))
+ ctorOutType, err2 := asStruct(ctorFuncType.Out(0))
+ hasErrRet, err3 := ensureErrorOrNil(getWithinLimit(1, ctorFuncType.Out, ctorFuncType.NumOut))
+ if err := errors.Join(err1, err2, err3); err != nil {
+ return nil, nil, false, err
+ }
+
+ // create types that have fx-aware embed-fields
+ // these are used to construct a function that can build the fx graph
+ inFxType, err := constructFxInType(ctorInType)
+ if err != nil {
+ return nil, nil, hasErrRet, err
+ }
+ outFxType, err := constructFxOutType(ctorOutType)
+ return inFxType, outFxType, hasErrRet, err
+}
+
+func constructFxInType(plainType reflect.Type) (reflect.Type, error) {
+ return constructFxAwareStruct(plainType, false)
+}
+
+func constructFxOutType(plainType reflect.Type) (reflect.Type, error) {
+ return constructFxAwareStruct(plainType, true)
+}
+
+// construct a new fx-aware struct type that matches the plainType, but has fx.In / fx.Out embedded
+func constructFxAwareStruct(plainType reflect.Type, isOut bool) (reflect.Type, error) {
+ var oldEmbed, newEmbed reflect.Type
+ if isOut {
+ oldEmbed = compOutType
+ newEmbed = fxOutType
+ } else {
+ newEmbed = fxInType
+ }
+ if plainType == nil {
+ return reflect.StructOf([]reflect.StructField{toEmbedField(newEmbed)}), nil
+ }
+ if plainType.Kind() == reflect.Interface {
+ field := reflect.StructField{Name: plainType.Name(), Type: plainType}
+ return reflect.StructOf([]reflect.StructField{toEmbedField(newEmbed), field}), nil
+ }
+ if plainType.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("bad type: %T", plainType)
+ }
+ return replaceStructEmbeds(plainType, oldEmbed, newEmbed, true), nil
+}
+
+// replaceStructEmbeds copies a struct type to a newly created struct type, removing
+// the oldEmbed fields and prepending the newEmbed field, if given. This is done
+// recursively for fields that themselves contain an embedding type
+func replaceStructEmbeds(typ, oldEmbed, newEmbed reflect.Type, assumeEmbed bool) reflect.Type {
+ hasEmbed := assumeEmbed || hasEmbedField(typ, oldEmbed)
+ if !hasEmbed {
+ return typ
+ }
+
+ newFields := make([]reflect.StructField, 0, typ.NumField())
+ for n := 0; n < typ.NumField(); n++ {
+ field := typ.Field(n)
+ if field.Type == oldEmbed {
+ continue
+ }
+ if field.Type.Kind() == reflect.Struct && oldEmbed != nil && newEmbed != nil && hasEmbed {
+ field = reflect.StructField{Name: field.Name, Type: replaceStructEmbeds(field.Type, oldEmbed, newEmbed, false)}
+ }
+ newFields = append(newFields, reflect.StructField{Name: field.Name, Type: field.Type})
+ }
+
+ if hasEmbed && newEmbed != nil {
+ newFields = append([]reflect.StructField{toEmbedField(newEmbed)}, newFields...)
+ }
+ return reflect.StructOf(newFields)
+}
+
+// create arguments that are ready to be passed to the plain constructor by
+// removing fx specific fields from the fx-aware requires struct
+func makeConstructorArgs(fxAwareReqs reflect.Value) []reflect.Value {
+ if fxAwareReqs.Kind() != reflect.Struct {
+ panic("pre-condition failure: must be called with Struct")
+ }
+ plainType := replaceStructEmbeds(fxAwareReqs.Type(), fxInType, nil, false)
+ return []reflect.Value{coerceStructTo(fxAwareReqs, plainType, fxOutType, nil)}
+}
+
+// change the return value from the plain constructor into an fx-aware provides struct
+func makeFxAwareProvides(plainSource reflect.Value, outFxType reflect.Type) reflect.Value {
+ if plainSource.Kind() == reflect.Interface {
+ // convert an interface into a struct that only contains it
+ fxAwareResult := reflect.New(outFxType).Elem()
+ fxAwareResult.Field(1).Set(plainSource)
+ return fxAwareResult
+ }
+ return coerceStructTo(plainSource, outFxType, compOutType, fxOutType)
+}
+
+// create a struct of the outType and copy fields-by-name from the input to it, replacing embeds recursively
+func coerceStructTo(input reflect.Value, outType reflect.Type, oldEmbed, newEmbed reflect.Type) reflect.Value {
+ result := reflect.New(outType).Elem()
+ for i := 0; i < result.NumField(); i++ {
+ target := result.Type().Field(i)
+ if target.Type == newEmbed {
+ continue
+ }
+ if v := input.FieldByName(target.Name); v.IsValid() {
+ if hasEmbedField(v.Type(), oldEmbed) {
+ v = coerceStructTo(v, replaceStructEmbeds(v.Type(), oldEmbed, newEmbed, true), oldEmbed, newEmbed)
+ }
+ result.FieldByName(target.Name).Set(v)
+ }
+ }
+ return result
+}
+
+var _ compdef.Lifecycle = (*fxLifecycleAdapter)(nil)
+
+type fxLifecycleAdapter struct {
+ lc fx.Lifecycle
+}
+
+func newFxLifecycleAdapter(lc fx.Lifecycle) compdef.Lifecycle {
+ return &fxLifecycleAdapter{lc: lc}
+}
+
+func (a *fxLifecycleAdapter) Append(h compdef.Hook) {
+ a.lc.Append(fx.Hook{
+ OnStart: h.OnStart,
+ OnStop: h.OnStop,
+ })
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/run.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/run.go
new file mode 100644
index 0000000000..3fc9fed15c
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/run.go
@@ -0,0 +1,43 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package fxutil
+
+import (
+ "context"
+ "errors"
+
+ "go.uber.org/fx"
+)
+
+// Run runs an fx.App using the supplied options, returning any errors.
+//
+// This differs from fx.App#Run in that it returns errors instead of exiting
+// the process.
+func Run(opts ...fx.Option) error {
+ if fxAppTestOverride != nil {
+ return fxAppTestOverride(func() {}, opts)
+ }
+
+ opts = append(opts, FxLoggingOption(), fx.Provide(newFxLifecycleAdapter))
+ // Temporarily increase timeout for all fxutil.Run calls until we can better characterize our
+ // start time requirements. Prepend to opts so individual calls can override the timeout.
+ opts = append(
+ []fx.Option{TemporaryAppTimeouts()},
+ opts...,
+ )
+ app := fx.New(opts...)
+
+ startCtx, cancel := context.WithTimeout(context.Background(), app.StartTimeout())
+ defer cancel()
+
+ if err := app.Start(startCtx); err != nil {
+ return errors.Join(UnwrapIfErrArgumentsFailed(err), stopApp(app))
+ }
+
+ <-app.Done()
+
+ return stopApp(app)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/test.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/test.go
new file mode 100644
index 0000000000..ac34e31bb5
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/test.go
@@ -0,0 +1,308 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package fxutil
+
+import (
+ "context"
+ "reflect"
+ "testing"
+
+ "github.com/spf13/cobra"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/fx"
+ "go.uber.org/fx/fxtest"
+)
+
+// NoDependencies defines a component which doesn't have any dependencies
+type NoDependencies struct {
+ fx.In
+}
+
+// fxAppTestOverride allows TestRunCommand and TestOneShotSubcommand to
+// override the Run and OneShot functions. It is always nil in production.
+var fxAppTestOverride func(interface{}, []fx.Option) error
+
+// Test starts an app and returns fulfilled dependencies
+//
+// The generic return type T must conform to fx.In such
+// that it's dependencies can be fulfilled.
+func Test[T any](t testing.TB, opts ...fx.Option) T {
+ var deps T
+ delayed := newDelayedFxInvocation(func(d T) {
+ deps = d
+ })
+
+ app := fxtest.New(
+ t,
+ fx.Provide(newFxLifecycleAdapter),
+ fx.Supply(fx.Annotate(t, fx.As(new(testing.TB)))),
+ delayed.option(),
+ fx.Options(opts...),
+ )
+ app.RequireStart()
+
+ t.Cleanup(func() {
+ app.RequireStop()
+ })
+
+ if err := delayed.call(); err != nil {
+ t.Fatal(err.Error())
+ }
+
+ return deps
+}
+
+// TestApp starts an fx app and returns fulfilled dependencies
+//
+// The generic return type T must conform to fx.In such
+// that it's dependencies can be fulfilled.
+func TestApp[T any](opts ...fx.Option) (*fx.App, T, error) {
+ var deps T
+ delayed := newDelayedFxInvocation(func(d T) {
+ deps = d
+ })
+
+ app := fx.New(
+ delayed.option(),
+ fx.Options(opts...),
+ )
+ var err error
+ if err = app.Start(context.TODO()); err != nil {
+ return nil, deps, err
+ }
+
+ err = delayed.call()
+
+ return app, deps, err
+}
+
+type appAssertFn func(testing.TB, *fx.App)
+
+// TestStart runs an app fx.App.
+//
+// This function does *not* leverage fxtest.App because we want to be
+// able to test for App initialization errors and expected failures.
+//
+// The given function is called after the app's startup has completed, with its
+// arguments filled via Fx's dependency injection. The provided testing.TB
+// argument will be used for the appAssertFn hook, but the test will not automatically
+// fail if the application fails to start.
+//
+// The supplied `fn` function will never be called, but is required to setup
+// that arg appropriately
+//
+// Use `fx.Options(..)` to bundle multiple fx.Option values into one.
+func TestStart(t testing.TB, opts fx.Option, appAssert appAssertFn, fn interface{}) {
+ delayed := newDelayedFxInvocation(fn)
+ app := fx.New(
+ fx.Supply(fx.Annotate(t, fx.As(new(testing.TB)))),
+ delayed.option(),
+ opts,
+ )
+
+ appAssert(t, app)
+}
+
+// TestRun is a helper for testing code that uses fxutil.Run
+//
+// It takes a anonymous function, and sets up fx so that no actual App
+// will be constructed. Instead, it expects the given function to call
+// fxutil.Run. Then, this test verifies that all Options given to that
+// fxutil.Run call will satisfy fx's dependences by using fx.ValidateApp.
+func TestRun(t *testing.T, f func() error) {
+ var fxFakeAppRan bool
+ fxAppTestOverride = func(i interface{}, opts []fx.Option) error {
+ fxFakeAppRan = true
+ require.NoError(t, fx.ValidateApp(opts...))
+ return nil
+ }
+ defer func() { fxAppTestOverride = nil }()
+ require.NoError(t, f())
+ require.True(t, fxFakeAppRan, "fxutil.Run wasn't called")
+}
+
+// TestOneShotSubcommand is a helper for testing commands implemented with fxutil.OneShot.
+//
+// It takes an array of commands, and attaches all to a temporary top-level
+// command, then executes the given command line (beginning with the
+// subcommand name) against that top-level command.
+//
+// The execution should eventually call fxutil.OneShot with the oneShotFunc
+// given by expectedOneShotFunc. However, this function will not actually be
+// called, as that would lead to the one-shot command actually running. It
+// is validated with fx.ValidateApp, however.
+//
+// The `fx.Option`s passed to fxutil.OneShot are used to create a new app
+// containing only the final argument to this function. Be careful not to
+// require any components, since nothing is mocked here. Typically, the
+// function only requires static values such as `BundleParams` or `cliParams`
+// and asserts they contain appropriate values.
+func TestOneShotSubcommand(
+ t *testing.T,
+ subcommands []*cobra.Command,
+ commandline []string,
+ expectedOneShotFunc interface{},
+ verifyFn interface{},
+) {
+ var oneShotRan bool
+ fxAppTestOverride = func(oneShotFunc interface{}, opts []fx.Option) error {
+ oneShotRan = true
+
+ // verify that the expected oneShotFunc would have been called
+ require.Equal(t,
+ reflect.ValueOf(expectedOneShotFunc).Pointer(),
+ reflect.ValueOf(oneShotFunc).Pointer(),
+ "got a different oneShotFunc than expected")
+
+ // validate the app with the original oneShotFunc, to ensure that
+ // any types it requires are provided.
+ require.NoError(t,
+ fx.ValidateApp(
+ append(opts,
+ fx.Invoke(oneShotFunc))...))
+
+ // build an app without the oneShotFunc, and with verifyFn
+ app := fxtest.New(t,
+ append(opts,
+ fx.Supply(fx.Annotate(t, fx.As(new(testing.TB)))),
+ fx.Invoke(verifyFn))...)
+ defer app.RequireStart().RequireStop()
+ return nil
+ }
+ defer func() { fxAppTestOverride = nil }()
+
+ cmd := &cobra.Command{Use: "test"}
+ for _, c := range subcommands {
+ cmd.AddCommand(c)
+ }
+ cmd.SetArgs(append([]string{}, commandline...))
+
+ require.NoError(t, cmd.Execute())
+ require.True(t, oneShotRan, "fxutil.OneShot wasn't called")
+}
+
+// TestOneShot is a helper for testing there is no missing dependencies when calling
+// fxutil.OneShot.
+//
+// The function passed as the first argument of fx.OneShot is not called. It
+// is validated with fx.ValidateApp, however.
+func TestOneShot(t *testing.T, fct func()) {
+ var oneShotRan bool
+ fxAppTestOverride = func(oneShotFunc interface{}, opts []fx.Option) error {
+ oneShotRan = true
+ // validate the app with the original oneShotFunc, to ensure that
+ // any types it requires are provided.
+ require.NoError(t,
+ fx.ValidateApp(
+ append(opts,
+ fx.Invoke(oneShotFunc))...))
+ return nil
+ }
+ defer func() { fxAppTestOverride = nil }()
+
+ fct()
+ require.True(t, oneShotRan, "fxutil.OneShot wasn't called")
+}
+
+// TestBundle is an helper to test Bundle.
+//
+// This function checks that all components built with fx.Provide inside a bundle can be instanciated.
+// To do so, it creates an `fx.Invoke(_ component1, _ component2, ...)` and call fx.ValidateApp
+func TestBundle(t *testing.T, bundle BundleOptions, extraOptions ...fx.Option) {
+ var componentTypes []reflect.Type
+
+ for _, option := range bundle.Options {
+ module, ok := option.(Module)
+ if ok {
+ t.Logf("Discovering components for %v", module)
+ for _, moduleOpt := range module.Options {
+ componentTypes = appendModuleComponentTypes(t, componentTypes, moduleOpt)
+ }
+ }
+ }
+ invoke := createFxInvokeOption(componentTypes)
+
+ t.Logf("Check the following components are instanciable: %v", componentTypes)
+ require.NoError(t, fx.ValidateApp(
+ invoke,
+ bundle,
+ fx.Options(extraOptions...),
+ fx.Supply(fx.Annotate(t, fx.As(new(testing.TB)))),
+ ))
+}
+
+// appendModuleComponentTypes appends the components inside provideOption to componentTypes
+func appendModuleComponentTypes(t *testing.T, componentTypes []reflect.Type, provideOption fx.Option) []reflect.Type {
+ moduleValue := reflect.ValueOf(provideOption)
+ // provideOption has a `Targets`` field of factories: https://github.com/uber-go/fx/blob/master/provide.go#L65-L68
+ targets := moduleValue.FieldByName("Targets")
+ if targets.IsValid() {
+ targetValues := targets.Interface().([]interface{})
+ for _, target := range targetValues {
+ targetType := reflect.TypeOf(target)
+ if targetType.Kind() == reflect.Func && targetType.NumOut() > 0 {
+ // As the first returned type is the component it is enough to consider
+ // only the first type
+ returnType := targetType.Out(0)
+ types := getComponents(t, returnType)
+ componentTypes = append(componentTypes, types...)
+ }
+ }
+ }
+ return componentTypes
+}
+
+// getComponents returns the component contained in mainType.
+func getComponents(t *testing.T, mainType reflect.Type) []reflect.Type {
+ if isFxOutType(mainType) {
+ var types []reflect.Type
+ for i := 0; i < mainType.NumField(); i++ {
+ field := mainType.Field(i)
+ fieldType := field.Type
+
+ // Ignore fx groups because returning an instance of
+ // type Provider struct {
+ // fx.Out
+ // Provider MyProvider `group:"myGroup"`
+ // }
+ // doesn't satisfy fx.Invoke(_ MyProvider)
+ if fieldType != fxOutType && field.Tag.Get("group") == "" {
+ types = append(types, getComponents(t, fieldType)...)
+ }
+ }
+ return types
+ }
+
+ if mainType.Kind() == reflect.Interface || mainType.Kind() == reflect.Struct {
+ t.Logf("\tFound: %v", mainType)
+ return []reflect.Type{mainType}
+ }
+ return nil
+}
+
+func isFxOutType(t reflect.Type) bool {
+ if t.Kind() == reflect.Struct {
+ for i := 0; i < t.NumField(); i++ {
+ fieldType := t.Field(i).Type
+ if fieldType == fxOutType {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// createFxInvokeOption creates fx.Invoke(_ componentTypes[0], _ componentTypes[1], ...)
+func createFxInvokeOption(componentTypes []reflect.Type) fx.Option {
+ fctSig := reflect.FuncOf(componentTypes, nil, false)
+ captureArgs := reflect.MakeFunc(
+ fctSig,
+ func(args []reflect.Value) []reflect.Value {
+ return []reflect.Value{}
+ })
+
+ return fx.Invoke(captureArgs.Interface())
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/timeout.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/timeout.go
new file mode 100644
index 0000000000..0b66392881
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/fxutil/timeout.go
@@ -0,0 +1,55 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package fxutil
+
+import (
+ "os"
+ "strconv"
+ "time"
+
+ "go.uber.org/fx"
+)
+
+const (
+ envFxStartTimeoutOverride = "DD_FX_START_TIMEOUT_SECONDS"
+ envFxStopTimeoutOverride = "DD_FX_STOP_TIMEOUT_SECONDS"
+ defaultFxTimeout = 5 * time.Minute
+)
+
+// TemporaryAppTimeouts returns new fx Start/Stop timeout options, defaulting to 5 minutes.
+//
+// The start timeout can be overridden with the DD_FX_START_TIMEOUT_SECONDS environment variable.
+// The stop timeout can be overridden with the DD_FX_STOP_TIMEOUT_SECONDS environment variable.
+//
+// Before fx the Agent did not have any start/stop timeouts, it would hang indefinitely. As we have
+// have been adding more fx.Hooks we began hitting flaky tests with expired fx timeouts.
+// We use a large timeout value by default to minimize the chance that customers will be impacted by the timeout.
+// However, note that most platforms service managers send SIGKILL after a timeout
+// - upstart default is 5 seconds
+// - see pkg/util/winutil/servicemain/servicemain.go:Service.HardStopTimeout
+//
+// We can revisit this once we can better characterize the agent start/stop behavior and be intentional
+// about timeout values
+func TemporaryAppTimeouts() fx.Option {
+ return fx.Options(
+ fx.StartTimeout(timeoutFromEnv(envFxStartTimeoutOverride)),
+ fx.StopTimeout(timeoutFromEnv(envFxStopTimeoutOverride)),
+ )
+}
+
+// timeoutFromEnv reads the environment variable named @envVariable and returns a go duration for that many seconds.
+// Returns defaultFxTimeout (5 minutes) if the environment variable does not exist or is not an integer.
+func timeoutFromEnv(envVariable string) time.Duration {
+ timeString, found := os.LookupEnv(envVariable)
+ if !found {
+ return defaultFxTimeout
+ }
+ timeValue, err := strconv.Atoi(timeString)
+ if err != nil {
+ return defaultFxTimeout
+ }
+ return time.Duration(timeValue) * time.Second
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/hostname/validate/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/util/hostname/validate/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/hostname/validate/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/hostname/validate/normalize.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/hostname/validate/normalize.go
new file mode 100644
index 0000000000..5ed6240ea7
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/hostname/validate/normalize.go
@@ -0,0 +1,59 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package validate
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+)
+
+var (
+ // Filter to clean the directory name from invalid file name characters
+ directoryNameFilter = regexp.MustCompile(`[^a-zA-Z0-9_-]+`)
+)
+
+const (
+ // Maximum size for a directory name
+ directoryToHostnameMaxSize = 32
+)
+
+// NormalizeHost applies a liberal policy on host names.
+func NormalizeHost(host string) (string, error) {
+ var buf bytes.Buffer
+
+ // hosts longer than 253 characters are illegal
+ if len(host) > 253 {
+ return "", fmt.Errorf("hostname is too long, should contain less than 253 characters")
+ }
+
+ for _, r := range host {
+ switch r {
+ // has null rune just toss the whole thing
+ case '\x00':
+ return "", fmt.Errorf("hostname cannot contain null character")
+ // drop these characters entirely
+ case '\n', '\r', '\t':
+ continue
+ // replace characters that are generally used for xss with '-'
+ case '>', '<':
+ buf.WriteByte('-')
+ default:
+ buf.WriteRune(r)
+ }
+ }
+
+ return buf.String(), nil
+}
+
+// CleanHostnameDir returns a hostname normalized to be uses as a directory name.
+func CleanHostnameDir(hostname string) string {
+ hostname = directoryNameFilter.ReplaceAllString(hostname, "_")
+ if len(hostname) > directoryToHostnameMaxSize {
+ return hostname[:directoryToHostnameMaxSize]
+ }
+ return hostname
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/hostname/validate/validate.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/hostname/validate/validate.go
new file mode 100644
index 0000000000..1e52cdfd12
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/hostname/validate/validate.go
@@ -0,0 +1,55 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package validate provides hostname validation helpers
+package validate
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+const maxLength = 255
+
+var (
+ validHostnameRfc1123 = regexp.MustCompile(`^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$`)
+ localhostIdentifiers = []string{
+ "localhost",
+ "localhost.localdomain",
+ "localhost6.localdomain6",
+ "ip6-localhost",
+ }
+)
+
+// ValidHostname determines whether the passed string is a valid hostname.
+// In case it's not, the returned error contains the details of the failure.
+func ValidHostname(hostname string) error {
+ if hostname == "" {
+ return fmt.Errorf("hostname is empty")
+ } else if isLocal(hostname) {
+ return fmt.Errorf("%s is a local hostname", hostname)
+ } else if len(hostname) > maxLength {
+ log.Errorf("ValidHostname: name exceeded the maximum length of %d characters", maxLength)
+ return fmt.Errorf("name exceeded the maximum length of %d characters", maxLength)
+ } else if !validHostnameRfc1123.MatchString(hostname) {
+ log.Errorf("ValidHostname: %s is not RFC1123 compliant", hostname)
+ return fmt.Errorf("%s is not RFC1123 compliant", hostname)
+ }
+ return nil
+}
+
+// check whether the name is in the list of local hostnames
+func isLocal(name string) bool {
+ name = strings.ToLower(name)
+ for _, val := range localhostIdentifiers {
+ if val == name {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/http/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/util/http/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/http/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/http/client.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/http/client.go
new file mode 100644
index 0000000000..f6e115bdae
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/http/client.go
@@ -0,0 +1,71 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package http provides utilities to perform HTTP requests.
+package http
+
+import (
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+// ResetClient wraps (http.Client).Do and resets the underlying connections at the
+// configured interval
+type ResetClient struct {
+ httpClientFactory func() *http.Client
+ resetInterval time.Duration
+
+ mu sync.RWMutex
+ httpClient *http.Client
+ lastReset time.Time
+}
+
+// NewResetClient returns an initialized Client resetting connections at the passed resetInterval ("0"
+// means that no reset is performed).
+// The underlying http.Client used will be created using the passed http client factory.
+func NewResetClient(resetInterval time.Duration, httpClientFactory func() *http.Client) *ResetClient {
+ return &ResetClient{
+ httpClientFactory: httpClientFactory,
+ resetInterval: resetInterval,
+ httpClient: httpClientFactory(),
+ lastReset: time.Now(),
+ }
+}
+
+// Do wraps (http.Client).Do. Thread safe.
+func (c *ResetClient) Do(req *http.Request) (*http.Response, error) {
+ c.checkReset()
+
+ c.mu.RLock()
+ httpClient := c.httpClient
+ c.mu.RUnlock()
+
+ return httpClient.Do(req)
+}
+
+// checkReset checks whether a client reset should be performed, and performs it
+// if so
+func (c *ResetClient) checkReset() {
+ if c.resetInterval == 0 {
+ return
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if time.Since(c.lastReset) < c.resetInterval {
+ return
+ }
+
+ log.Debug("Resetting HTTP client's connections")
+ c.lastReset = time.Now()
+ // Close idle connections on underlying client. Safe to do while other goroutines use the client.
+ // This is a best effort: if other goroutine(s) are currently using the client,
+ // the related open connection(s) will remain open until the client is GC'ed
+ c.httpClient.CloseIdleConnections()
+ c.httpClient = c.httpClientFactory()
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/http/helpers.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/http/helpers.go
new file mode 100644
index 0000000000..2dd70d1b3e
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/http/helpers.go
@@ -0,0 +1,79 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package http
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+)
+
+func parseResponse(res *http.Response, method string, URL string) (string, error) {
+ if res.StatusCode != 200 {
+ return "", fmt.Errorf("status code %d trying to %s %s", res.StatusCode, method, URL)
+ }
+
+ defer res.Body.Close()
+ all, err := io.ReadAll(res.Body)
+ if err != nil {
+ return "", fmt.Errorf("error while reading response from %s: %s", URL, err)
+ }
+
+ return string(all), nil
+}
+
+// Get is a high level helper to query an URL and return its body as a string
+func Get(ctx context.Context, URL string, headers map[string]string, timeout time.Duration, cfg pkgconfigmodel.Reader) (string, error) {
+ client := http.Client{
+ Transport: CreateHTTPTransport(cfg),
+ Timeout: timeout,
+ }
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, URL, nil)
+ if err != nil {
+ return "", err
+ }
+
+ for header, value := range headers {
+ req.Header.Add(header, value)
+ }
+
+ res, err := client.Do(req)
+ if err != nil {
+ return "", err
+ }
+
+ return parseResponse(res, "GET", URL)
+}
+
+// Put is a high level helper to query an URL using the PUT method and return its body as a string
+func Put(ctx context.Context, URL string, headers map[string]string, body []byte, timeout time.Duration, cfg pkgconfigmodel.Reader) (string, error) {
+ client := http.Client{
+ Transport: CreateHTTPTransport(cfg),
+ Timeout: timeout,
+ }
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodPut, URL, bytes.NewBuffer(body))
+ if err != nil {
+ return "", err
+ }
+
+ for header, value := range headers {
+ req.Header.Add(header, value)
+ }
+
+ res, err := client.Do(req)
+ if err != nil {
+ return "", err
+ }
+
+ return parseResponse(res, "PUT", URL)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/http/token.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/http/token.go
new file mode 100644
index 0000000000..a0a806659f
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/http/token.go
@@ -0,0 +1,60 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package http
+
+import (
+ "context"
+ "sync"
+ "time"
+)
+
+// APITokenRenewCallback represents the callback type to fetch a token
+type APITokenRenewCallback func(context.Context) (string, time.Time, error)
+
+// APIToken is an API token with auto renew when expired
+type APIToken struct {
+ Value string
+ ExpirationDate time.Time
+ renewCallback APITokenRenewCallback
+
+ sync.RWMutex
+}
+
+// NewAPIToken returns a new APIToken
+func NewAPIToken(cb APITokenRenewCallback) *APIToken {
+ return &APIToken{
+ renewCallback: cb,
+ }
+}
+
+// Get returns the token value
+func (token *APIToken) Get(ctx context.Context) (string, error) {
+ token.RLock()
+ // The token renewal window is open, refreshing the token
+ if time.Now().Before(token.ExpirationDate) {
+ val := token.Value
+ token.RUnlock()
+ return val, nil
+ }
+ token.RUnlock()
+
+ token.Lock()
+ defer token.Unlock()
+ // Token has been refreshed by another caller
+ if time.Now().Before(token.ExpirationDate) {
+ return token.Value, nil
+ }
+
+ value, expirationDate, err := token.renewCallback(ctx)
+ if err != nil {
+ token.ExpirationDate = time.Now()
+ return "", err
+ }
+
+ token.Value = value
+ token.ExpirationDate = expirationDate
+ return token.Value, nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/http/transport.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/http/transport.go
new file mode 100644
index 0000000000..62d1641e58
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/http/transport.go
@@ -0,0 +1,219 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package http
+
+import (
+ "crypto/tls"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http/httpproxy"
+
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+var (
+ keyLogWriterInit sync.Once
+ keyLogWriter io.Writer
+)
+
+func logSafeURLString(url *url.URL) string {
+ if url == nil {
+ return ""
+ }
+ return url.Scheme + "://" + url.Host
+}
+
+// minTLSVersionFromConfig determines the minimum TLS version defined by the given
+// config, accounting for defaults and deprecated configuration parameters.
+//
+// The returned result is one of the `tls.VersionTLSxxx` constants.
+func minTLSVersionFromConfig(cfg pkgconfigmodel.Reader) uint16 {
+ var min uint16
+ minTLSVersion := cfg.GetString("min_tls_version")
+ switch strings.ToLower(minTLSVersion) {
+ case "tlsv1.0":
+ min = tls.VersionTLS10
+ case "tlsv1.1":
+ min = tls.VersionTLS11
+ case "tlsv1.2":
+ min = tls.VersionTLS12
+ case "tlsv1.3":
+ min = tls.VersionTLS13
+ default:
+ min = tls.VersionTLS12
+ if minTLSVersion != "" {
+ log.Warnf("Invalid `min_tls_version` %#v; using default", minTLSVersion)
+ }
+ }
+ return min
+}
+
+// CreateHTTPTransport creates an *http.Transport for use in the agent
+func CreateHTTPTransport(cfg pkgconfigmodel.Reader) *http.Transport {
+ // It’s OK to reuse the same file for all the http.Transport objects we create
+ // because all the writes to that file are protected by a global mutex.
+ // See https://github.com/golang/go/blob/go1.17.3/src/crypto/tls/common.go#L1316-L1318
+ keyLogWriterInit.Do(func() {
+ sslKeyLogFile := cfg.GetString("sslkeylogfile")
+ if sslKeyLogFile != "" {
+ var err error
+ keyLogWriter, err = os.OpenFile(sslKeyLogFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600)
+ if err != nil {
+ log.Warnf("Failed to open %s for writing NSS keys: %v", sslKeyLogFile, err)
+ }
+ }
+ })
+
+ tlsConfig := &tls.Config{
+ KeyLogWriter: keyLogWriter,
+ InsecureSkipVerify: cfg.GetBool("skip_ssl_validation"),
+ }
+
+ tlsConfig.MinVersion = minTLSVersionFromConfig(cfg)
+
+ // Most of the following timeouts are a copy of Golang http.DefaultTransport
+ // They are mostly used to act as safeguards in case we forget to add a general
+ // timeout to our http clients. Setting DialContext and TLSClientConfig has the
+ // desirable side-effect of disabling http/2; if removing those fields then
+ // consider the implication of the protocol switch for intakes and other http
+ // servers. See ForceAttemptHTTP2 in https://pkg.go.dev/net/http#Transport.
+
+ var tlsHandshakeTimeout time.Duration
+ if cfg.IsSet("tls_handshake_timeout") {
+ tlsHandshakeTimeout = cfg.GetDuration("tls_handshake_timeout")
+ } else {
+ tlsHandshakeTimeout = 10 * time.Second
+ }
+
+ transport := &http.Transport{
+ TLSClientConfig: tlsConfig,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ // Enables TCP keepalives to detect broken connections
+ KeepAlive: 30 * time.Second,
+ // Disable RFC 6555 Fast Fallback ("Happy Eyeballs")
+ FallbackDelay: -1 * time.Nanosecond,
+ }).DialContext,
+ MaxIdleConns: 100,
+ MaxIdleConnsPerHost: 5,
+ // This parameter is set to avoid connections sitting idle in the pool indefinitely
+ IdleConnTimeout: 45 * time.Second,
+ TLSHandshakeTimeout: tlsHandshakeTimeout,
+ ExpectContinueTimeout: 1 * time.Second,
+ }
+
+ if proxies := cfg.GetProxies(); proxies != nil {
+ transport.Proxy = GetProxyTransportFunc(proxies, cfg)
+ }
+
+ return transport
+}
+
+// GetProxyTransportFunc return a proxy function for a http.Transport that
+// would return the right proxy depending on the configuration.
+func GetProxyTransportFunc(p *pkgconfigmodel.Proxy, cfg pkgconfigmodel.Reader) func(*http.Request) (*url.URL, error) {
+ proxyConfig := &httpproxy.Config{
+ HTTPProxy: p.HTTP,
+ HTTPSProxy: p.HTTPS,
+ NoProxy: strings.Join(p.NoProxy, ","),
+ }
+
+ if cfg.GetBool("no_proxy_nonexact_match") {
+ return func(r *http.Request) (*url.URL, error) {
+ return proxyConfig.ProxyFunc()(r.URL)
+ }
+ }
+
+ return func(r *http.Request) (*url.URL, error) {
+ url, err := func(r *http.Request) (*url.URL, error) {
+ // check no_proxy list first
+ for _, host := range p.NoProxy {
+ if r.URL.Host == host {
+ log.Debugf("URL '%s' matches no_proxy list item '%s': not using any proxy", r.URL, host)
+ return nil, nil
+ }
+ }
+
+ // check proxy by scheme
+ confProxy := ""
+ if r.URL.Scheme == "http" {
+ confProxy = p.HTTP
+ } else if r.URL.Scheme == "https" {
+ confProxy = p.HTTPS
+ } else {
+ log.Warnf("Proxy configuration do not support scheme '%s'", r.URL.Scheme)
+ }
+
+ if confProxy != "" {
+ proxyURL, err := url.Parse(confProxy)
+ if err != nil {
+ err := fmt.Errorf("Could not parse the proxy URL for scheme %s from configuration: %s", r.URL.Scheme, err)
+ log.Error(err.Error())
+ return nil, err
+ }
+ userInfo := ""
+ if proxyURL.User != nil {
+ if _, isSet := proxyURL.User.Password(); isSet {
+ userInfo = "*****:*****@"
+ } else {
+ userInfo = "*****@"
+ }
+ }
+ logSafeURL := r.URL.Scheme + "://" + r.URL.Host
+ log.Debugf("Using proxy %s://%s%s for URL '%s'", proxyURL.Scheme, userInfo, proxyURL.Host, logSafeURL)
+ return proxyURL, nil
+ }
+
+ // no proxy set for this request
+ return nil, nil
+ }(r)
+
+ // Test the new proxy function to see if the behavior will change in the future
+ newURL, _ := proxyConfig.ProxyFunc()(r.URL)
+
+ if url == nil && newURL == nil {
+ return url, err
+ }
+
+ logSafeURL := logSafeURLString(r.URL)
+
+ // Print a warning if the url would ignore the proxy when no_proxy_nonexact_match is true
+ if url != nil && newURL == nil {
+ warnOnce(noProxyIgnoredWarningMap, logSafeURL, "Deprecation warning: the HTTP request to %s uses proxy %s but will ignore the proxy when the Agent configuration option no_proxy_nonexact_match defaults to true in a future agent version. Please adapt the Agent’s proxy configuration accordingly", logSafeURL, url.String())
+ return url, err
+ }
+
+ var newURLString string
+ if newURL != nil {
+ newURLString = newURL.String()
+ }
+
+ // There are no known cases that will trigger the below warnings but because they are logically possible we should still include them.
+
+ // Print a warning if the url does not use the proxy - but will for some reason when no_proxy_nonexact_match is true
+ if url == nil && newURL != nil {
+ warnOnce(noProxyUsedInFuture, logSafeURL, "Deprecation warning: the HTTP request to %s does not use a proxy but will use: %s when the Agent configuration option no_proxy_nonexact_match defaults to true in a future agent version.", logSafeURL, logSafeURLString(newURL))
+ return url, err
+ }
+
+ // Print a warning if the url uses the proxy and still will when no_proxy_nonexact_match is true but for some reason is different
+ if url.String() != newURLString {
+ warnOnce(noProxyChanged, logSafeURL, "Deprecation warning: the HTTP request to %s uses proxy %s but will change to %s when the Agent configuration option no_proxy_nonexact_match defaults to true", logSafeURL, url.String(), logSafeURLString(newURL))
+ return url, err
+ }
+
+ return url, err
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/http/warnings.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/http/warnings.go
new file mode 100644
index 0000000000..b20ba361af
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/http/warnings.go
@@ -0,0 +1,80 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package http
+
+import (
+ "sync"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+var (
+ // noProxyIgnoredWarningMap map containing URL's who will ignore the proxy in the future
+ noProxyIgnoredWarningMap = make(map[string]bool)
+
+ // noProxyUsedInFuture map containing URL's that will use a proxy in the future
+ noProxyUsedInFuture = make(map[string]bool)
+
+ // noProxyChanged map containing URL's whose proxy behavior will change in the future
+ noProxyChanged = make(map[string]bool)
+
+ // noProxyMapMutex Lock for all no proxy maps
+ noProxyMapMutex = sync.Mutex{}
+)
+
+// GetNumberOfWarnings returns the total number of warnings
+func GetNumberOfWarnings() int {
+ noProxyMapMutex.Lock()
+ defer noProxyMapMutex.Unlock()
+
+ return len(noProxyIgnoredWarningMap) + len(noProxyUsedInFuture) + len(noProxyChanged)
+}
+
+// GetProxyIgnoredWarnings returns the list of URL which will ignore the proxy in the future
+func GetProxyIgnoredWarnings() []string {
+ noProxyMapMutex.Lock()
+ defer noProxyMapMutex.Unlock()
+
+ ignoredWarnings := []string{}
+ for warn := range noProxyIgnoredWarningMap {
+ ignoredWarnings = append(ignoredWarnings, warn)
+ }
+ return ignoredWarnings
+}
+
+// GetProxyUsedInFutureWarnings returns the list of URL which will use a proxy in the future
+func GetProxyUsedInFutureWarnings() []string {
+ noProxyMapMutex.Lock()
+ defer noProxyMapMutex.Unlock()
+
+ usedInFuture := []string{}
+ for warn := range noProxyUsedInFuture {
+ usedInFuture = append(usedInFuture, warn)
+ }
+ return usedInFuture
+}
+
+// GetProxyChangedWarnings returns the list of URL whose proxy behavior will change in the future
+func GetProxyChangedWarnings() []string {
+ noProxyMapMutex.Lock()
+ defer noProxyMapMutex.Unlock()
+
+ proxyChanged := []string{}
+ for warn := range noProxyChanged {
+ proxyChanged = append(proxyChanged, warn)
+ }
+
+ return proxyChanged
+}
+
+func warnOnce(warnMap map[string]bool, key string, format string, params ...interface{}) {
+ noProxyMapMutex.Lock()
+ defer noProxyMapMutex.Unlock()
+ if _, ok := warnMap[key]; !ok {
+ warnMap[key] = true
+ log.Warnf(format, params...)
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/http/warnings_mock.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/http/warnings_mock.go
new file mode 100644
index 0000000000..3cc61a334e
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/http/warnings_mock.go
@@ -0,0 +1,33 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build test
+
+package http
+
+import "testing"
+
+func setupTest(t *testing.T) {
+ t.Cleanup(func() {
+ noProxyIgnoredWarningMap = make(map[string]bool)
+ noProxyUsedInFuture = make(map[string]bool)
+ noProxyChanged = make(map[string]bool)
+ })
+}
+
+// MockWarnings mocks the warnings with provided values
+func MockWarnings(t *testing.T, ignored, usedInFuture, proxyChanged []string) {
+ setupTest(t)
+
+ for _, w := range ignored {
+ noProxyIgnoredWarningMap[w] = true
+ }
+ for _, w := range usedInFuture {
+ noProxyUsedInFuture[w] = true
+ }
+ for _, w := range proxyChanged {
+ noProxyChanged[w] = true
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log.go
index 92cd71da38..33ee10ae76 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log.go
@@ -59,6 +59,10 @@ type DatadogLogger struct {
l sync.RWMutex
}
+/*
+* Setup and initialization of the logger
+ */
+
// SetupLogger setup agent wide logger
func SetupLogger(i seelog.LoggerInterface, level string) {
logger.Store(setupCommonLogger(i, level))
@@ -72,11 +76,6 @@ func SetupLogger(i seelog.LoggerInterface, level string) {
logsBuffer = []func(){}
}
-// SetupJMXLogger setup JMXfetch specific logger
-func SetupJMXLogger(i seelog.LoggerInterface, level string) {
- jmxLogger.Store(setupCommonLogger(i, level))
-}
-
func setupCommonLogger(i seelog.LoggerInterface, level string) *DatadogLogger {
l := &DatadogLogger{
inner: i,
@@ -108,63 +107,364 @@ func addLogToBuffer(logHandle func()) {
logsBuffer = append(logsBuffer, logHandle)
}
-func (sw *DatadogLogger) replaceInnerLogger(l seelog.LoggerInterface) seelog.LoggerInterface {
- sw.l.Lock()
- defer sw.l.Unlock()
+func (sw *DatadogLogger) scrub(s string) string {
+ if scrubbed, err := scrubBytesFunc([]byte(s)); err == nil {
+ return string(scrubbed)
+ }
+ return s
+}
- old := sw.inner
- sw.inner = l
+/*
+* Operation on the **logger level**
+ */
- return old
+// ChangeLogLevel changes the current log level, valid levels are trace, debug,
+// info, warn, error, critical and off, it requires a new seelog logger because
+// an existing one cannot be updated
+func ChangeLogLevel(li seelog.LoggerInterface, level string) error {
+ if err := logger.changeLogLevel(level); err != nil {
+ return err
+ }
+
+ // See detailed explanation in SetupLogger(...)
+ if err := li.SetAdditionalStackDepth(defaultStackDepth); err != nil {
+ return err
+ }
+
+ logger.replaceInnerLogger(li)
+ return nil
+
+ // need to return something, just set to Info (expected default)
}
+func (sw *loggerPointer) changeLogLevel(level string) error {
+ l := sw.Load()
+ if l == nil {
+ return errors.New("cannot change loglevel: logger not initialized")
+ }
-func (sw *DatadogLogger) changeLogLevel(level string) error {
- sw.l.Lock()
- defer sw.l.Unlock()
+ l.l.Lock()
+ defer l.l.Unlock()
+
+ if l.inner == nil {
+ return errors.New("cannot change loglevel: logger is initialized however logger.inner is nil")
+ }
lvl, ok := seelog.LogLevelFromString(strings.ToLower(level))
if !ok {
return errors.New("bad log level")
}
- logger.Load().level = lvl
+ l.level = lvl
return nil
}
+// GetLogLevel returns a seelog native representation of the current log level
+func GetLogLevel() (seelog.LogLevel, error) {
+ return logger.getLogLevel()
+}
+func (sw *loggerPointer) getLogLevel() (seelog.LogLevel, error) {
+ l := sw.Load()
+ if l == nil {
+ return seelog.InfoLvl, errors.New("cannot get loglevel: logger not initialized")
+ }
+
+ l.l.RLock()
+ defer l.l.RUnlock()
+
+ if l.inner == nil {
+ return seelog.InfoLvl, errors.New("cannot get loglevel: logger not initialized")
+ }
+
+ return l.level, nil
+}
+
+// ShouldLog returns whether a given log level should be logged by the default logger
+func ShouldLog(lvl seelog.LogLevel) bool {
+ // The lock stay in the exported function due to the use of `shouldLog` in function that already hold the lock
+ l := logger.Load()
+ if l != nil {
+ l.l.RLock()
+ defer l.l.RUnlock()
+ return l.shouldLog(lvl)
+ }
+ return false
+}
+
+// This function should be called with `sw.l` held
func (sw *DatadogLogger) shouldLog(level seelog.LogLevel) bool {
- sw.l.RLock()
- shouldLog := level >= sw.level
- sw.l.RUnlock()
+ return level >= sw.level
+}
- return shouldLog
+/*
+* Operation on the **logger**
+ */
+
+// RegisterAdditionalLogger registers an additional logger for logging
+func RegisterAdditionalLogger(n string, li seelog.LoggerInterface) error {
+ return logger.registerAdditionalLogger(n, li)
}
+func (sw *loggerPointer) registerAdditionalLogger(n string, li seelog.LoggerInterface) error {
+ l := sw.Load()
+ if l == nil {
+ return errors.New("cannot register: logger not initialized")
+ }
-func (sw *DatadogLogger) registerAdditionalLogger(n string, l seelog.LoggerInterface) error {
- sw.l.Lock()
- defer sw.l.Unlock()
+ l.l.Lock()
+ defer l.l.Unlock()
+
+ if l.inner == nil {
+ return errors.New("cannot register: logger not initialized")
+ }
+
+ if l.extra == nil {
- if sw.extra == nil {
return errors.New("logger not fully initialized, additional logging unavailable")
}
- if _, ok := sw.extra[n]; ok {
+ if _, ok := l.extra[n]; ok {
return errors.New("logger already registered with that name")
}
- sw.extra[n] = l
+ l.extra[n] = li
return nil
}
-func (sw *DatadogLogger) scrub(s string) string {
- if scrubbed, err := scrubBytesFunc([]byte(s)); err == nil {
- return string(scrubbed)
+// ReplaceLogger allows replacing the internal logger, returns old logger
+func ReplaceLogger(li seelog.LoggerInterface) seelog.LoggerInterface {
+ return logger.replaceInnerLogger(li)
+}
+func (sw *loggerPointer) replaceInnerLogger(li seelog.LoggerInterface) seelog.LoggerInterface {
+ l := sw.Load()
+ if l == nil {
+ return nil // Return nil if logger is not initialized
}
- return s
+ l.l.Lock()
+ defer l.l.Unlock()
+
+ if l.inner == nil {
+ return nil // Return nil if logger.inner is not initialized
+ }
+
+ old := l.inner
+ l.inner = li
+
+ return old
+}
+
+// Flush flushes the underlying inner log
+func Flush() {
+ logger.flush()
+ jmxLogger.flush()
+}
+func (sw *loggerPointer) flush() {
+ l := sw.Load()
+ if l == nil {
+ return
+ }
+
+ l.l.Lock()
+ defer l.l.Unlock()
+
+ if l.inner != nil {
+ l.inner.Flush()
+ }
+}
+
+/*
+* log functions
+ */
+
+// log logs a message at the given level, using either bufferFunc (if logging is not yet set up) or
+// scrubAndLogFunc, and treating the variadic args as the message.
+func log(logLevel seelog.LogLevel, bufferFunc func(), scrubAndLogFunc func(string), v ...interface{}) {
+ l := logger.Load()
+
+ if l == nil {
+ addLogToBuffer(bufferFunc)
+ return
+ }
+
+ l.l.Lock()
+ defer l.l.Unlock()
+
+ if l.inner == nil {
+ addLogToBuffer(bufferFunc)
+ } else if l.shouldLog(logLevel) {
+ s := BuildLogEntry(v...)
+ scrubAndLogFunc(s)
+ }
+
+}
+func logWithError(logLevel seelog.LogLevel, bufferFunc func(), scrubAndLogFunc func(string) error, fallbackStderr bool, v ...interface{}) error {
+ l := logger.Load()
+
+ if l == nil {
+ addLogToBuffer(bufferFunc)
+ err := formatError(v...)
+ if fallbackStderr {
+ fmt.Fprintf(os.Stderr, "%s: %s\n", logLevel.String(), err.Error())
+ }
+ return err
+ }
+
+ l.l.Lock()
+
+ isInnerNil := l.inner == nil
+
+ if isInnerNil {
+ if !fallbackStderr {
+ addLogToBuffer(bufferFunc)
+ }
+ } else if l.shouldLog(logLevel) {
+ defer l.l.Unlock()
+ s := BuildLogEntry(v...)
+ return scrubAndLogFunc(s)
+ }
+
+ l.l.Unlock()
+
+ err := formatError(v...)
+ // Originally (PR 6436) fallbackStderr check had been added to handle a small window
+ // where error messages had been lost before Logger had been initialized. Adjusting
+ // just for that case because if the error log should not be logged - because it has
+ // been suppressed then it should be taken into account.
+ if fallbackStderr && isInnerNil {
+ fmt.Fprintf(os.Stderr, "%s: %s\n", logLevel.String(), err.Error())
+ }
+ return err
+}
+
+/*
+* logFormat functions
+ */
+
+func logFormat(logLevel seelog.LogLevel, bufferFunc func(), scrubAndLogFunc func(string, ...interface{}), format string, params ...interface{}) {
+ l := logger.Load()
+
+ if l == nil {
+ addLogToBuffer(bufferFunc)
+ return
+ }
+
+ l.l.Lock()
+ defer l.l.Unlock()
+
+ if l.inner == nil {
+ addLogToBuffer(bufferFunc)
+ } else if l.shouldLog(logLevel) {
+ scrubAndLogFunc(format, params...)
+ }
+}
+func logFormatWithError(logLevel seelog.LogLevel, bufferFunc func(), scrubAndLogFunc func(string, ...interface{}) error, format string, fallbackStderr bool, params ...interface{}) error {
+ l := logger.Load()
+
+ if l == nil {
+ addLogToBuffer(bufferFunc)
+ err := formatErrorf(format, params...)
+ if fallbackStderr {
+ fmt.Fprintf(os.Stderr, "%s: %s\n", logLevel.String(), err.Error())
+ }
+ return err
+ }
+
+ l.l.Lock()
+
+ isInnerNil := l.inner == nil
+
+ if isInnerNil {
+ if !fallbackStderr {
+ addLogToBuffer(bufferFunc)
+ }
+ } else if l.shouldLog(logLevel) {
+ defer l.l.Unlock()
+ return scrubAndLogFunc(format, params...)
+ }
+
+ l.l.Unlock()
+
+ err := formatErrorf(format, params...)
+ // Originally (PR 6436) fallbackStderr check had been added to handle a small window
+ // where error messages had been lost before Logger had been initialized. Adjusting
+ // just for that case because if the error log should not be logged - because it has
+ // been suppressed then it should be taken into account.
+ if fallbackStderr && isInnerNil {
+ fmt.Fprintf(os.Stderr, "%s: %s\n", logLevel.String(), err.Error())
+ }
+ return err
+}
+
+/*
+* logContext functions
+ */
+
+func logContext(logLevel seelog.LogLevel, bufferFunc func(), scrubAndLogFunc func(string), message string, depth int, context ...interface{}) {
+ l := logger.Load()
+
+ if l == nil {
+ addLogToBuffer(bufferFunc)
+ return
+ }
+
+ l.l.Lock()
+ defer l.l.Unlock()
+
+ if l.inner == nil {
+ addLogToBuffer(bufferFunc)
+ } else if l.shouldLog(logLevel) {
+ l.inner.SetContext(context)
+ l.inner.SetAdditionalStackDepth(defaultStackDepth + depth) //nolint:errcheck
+ scrubAndLogFunc(message)
+ l.inner.SetContext(nil)
+ l.inner.SetAdditionalStackDepth(defaultStackDepth) //nolint:errcheck
+ }
+}
+func logContextWithError(logLevel seelog.LogLevel, bufferFunc func(), scrubAndLogFunc func(string) error, message string, fallbackStderr bool, depth int, context ...interface{}) error {
+ l := logger.Load()
+
+ if l == nil {
+ addLogToBuffer(bufferFunc)
+ err := formatErrorc(message, context...)
+ if fallbackStderr {
+ fmt.Fprintf(os.Stderr, "%s: %s\n", logLevel.String(), err.Error())
+ }
+ return err
+ }
+
+ l.l.Lock()
+
+ isInnerNil := l.inner == nil
+
+ if isInnerNil {
+ if !fallbackStderr {
+ addLogToBuffer(bufferFunc)
+ }
+ } else if l.shouldLog(logLevel) {
+ l.inner.SetContext(context)
+ l.inner.SetAdditionalStackDepth(defaultStackDepth + depth) //nolint:errcheck
+ err := scrubAndLogFunc(message)
+ l.inner.SetContext(nil)
+ l.inner.SetAdditionalStackDepth(defaultStackDepth) //nolint:errcheck
+ defer l.l.Unlock()
+ return err
+ }
+
+ l.l.Unlock()
+
+ err := formatErrorc(message, context...)
+ if fallbackStderr && isInnerNil {
+ fmt.Fprintf(os.Stderr, "%s: %s\n", logLevel.String(), err.Error())
+ }
+ return err
}
// trace logs at the trace level, called with sw.l held
func (sw *loggerPointer) trace(s string) {
l := sw.Load()
+
+ if l == nil {
+ return
+ }
+
scrubbed := l.scrub(s)
l.inner.Trace(scrubbed)
@@ -178,6 +478,7 @@ func (sw *loggerPointer) trace(s string) {
func (sw *loggerPointer) traceStackDepth(s string, depth int) {
l := sw.Load()
scrubbed := l.scrub(s)
+
l.inner.SetAdditionalStackDepth(defaultStackDepth + depth) //nolint:errcheck
l.inner.Trace(scrubbed)
l.inner.SetAdditionalStackDepth(defaultStackDepth) //nolint:errcheck
@@ -390,14 +691,6 @@ func (sw *loggerPointer) criticalf(format string, params ...interface{}) error {
return err
}
-// getLogLevel returns the current log level
-func (sw *DatadogLogger) getLogLevel() seelog.LogLevel {
- sw.l.RLock()
- defer sw.l.RUnlock()
-
- return sw.level
-}
-
// BuildLogEntry concatenates all inputs with spaces
func BuildLogEntry(v ...interface{}) string {
var fmtBuffer bytes.Buffer
@@ -448,110 +741,6 @@ func formatErrorc(message string, context ...interface{}) error {
return errors.New(scrubMessage(msg))
}
-// log logs a message at the given level, using either bufferFunc (if logging is not yet set up) or
-// scrubAndLogFunc, and treating the variadic args as the message.
-func log(logLevel seelog.LogLevel, bufferFunc func(), scrubAndLogFunc func(string), v ...interface{}) {
- l := logger.Load()
- if l != nil && l.inner != nil && l.shouldLog(logLevel) {
- s := BuildLogEntry(v...)
- l.l.Lock()
- defer l.l.Unlock()
- scrubAndLogFunc(s)
- } else if l == nil || l.inner == nil {
- addLogToBuffer(bufferFunc)
- }
-}
-
-func logWithError(logLevel seelog.LogLevel, bufferFunc func(), scrubAndLogFunc func(string) error, fallbackStderr bool, v ...interface{}) error {
- l := logger.Load()
- if l != nil && l.inner != nil && l.shouldLog(logLevel) {
- s := BuildLogEntry(v...)
- l.l.Lock()
- defer l.l.Unlock()
- return scrubAndLogFunc(s)
- } else if l == nil || l.inner == nil {
- addLogToBuffer(bufferFunc)
- }
- err := formatError(v...)
-
- // Originally (PR 6436) fallbackStderr check had been added to handle a small window
- // where error messages had been lost before Logger had been initialized. Adjusting
- // just for that case because if the error log should not be logged - because it has
- // been suppressed then it should be taken into account.
- if fallbackStderr && (l == nil || l.inner == nil) {
- fmt.Fprintf(os.Stderr, "%s: %s\n", logLevel.String(), err.Error())
- }
- return err
-}
-
-func logFormat(logLevel seelog.LogLevel, bufferFunc func(), scrubAndLogFunc func(string, ...interface{}), format string, params ...interface{}) {
- l := logger.Load()
- if l != nil && l.inner != nil && l.shouldLog(logLevel) {
- l.l.Lock()
- defer l.l.Unlock()
- scrubAndLogFunc(format, params...)
- } else if l == nil || l.inner == nil {
- addLogToBuffer(bufferFunc)
- }
-}
-
-func logFormatWithError(logLevel seelog.LogLevel, bufferFunc func(), scrubAndLogFunc func(string, ...interface{}) error, format string, fallbackStderr bool, params ...interface{}) error {
- l := logger.Load()
- if l != nil && l.inner != nil && l.shouldLog(logLevel) {
- l.l.Lock()
- defer l.l.Unlock()
- return scrubAndLogFunc(format, params...)
- } else if l == nil || l.inner == nil {
- addLogToBuffer(bufferFunc)
- }
- err := formatErrorf(format, params...)
-
- // Originally (PR 6436) fallbackStderr check had been added to handle a small window
- // where error messages had been lost before Logger had been initialized. Adjusting
- // just for that case because if the error log should not be logged - because it has
- // been suppressed then it should be taken into account.
- if fallbackStderr && (l == nil || l.inner == nil) {
- fmt.Fprintf(os.Stderr, "%s: %s\n", logLevel.String(), err.Error())
- }
- return err
-}
-
-func logContext(logLevel seelog.LogLevel, bufferFunc func(), scrubAndLogFunc func(string), message string, depth int, context ...interface{}) {
- l := logger.Load()
- if l != nil && l.inner != nil && l.shouldLog(logLevel) {
- l.l.Lock()
- defer l.l.Unlock()
- l.inner.SetContext(context)
- l.inner.SetAdditionalStackDepth(defaultStackDepth + depth) //nolint:errcheck
- scrubAndLogFunc(message)
- l.inner.SetContext(nil)
- l.inner.SetAdditionalStackDepth(defaultStackDepth) //nolint:errcheck
- } else if l == nil || l.inner == nil {
- addLogToBuffer(bufferFunc)
- }
-}
-
-func logContextWithError(logLevel seelog.LogLevel, bufferFunc func(), scrubAndLogFunc func(string) error, message string, fallbackStderr bool, depth int, context ...interface{}) error {
- l := logger.Load()
- if l != nil && l.inner != nil && l.shouldLog(logLevel) {
- l.l.Lock()
- defer l.l.Unlock()
- l.inner.SetContext(context)
- l.inner.SetAdditionalStackDepth(defaultStackDepth + depth) //nolint:errcheck
- err := scrubAndLogFunc(message)
- l.inner.SetContext(nil)
- l.inner.SetAdditionalStackDepth(defaultStackDepth) //nolint:errcheck
- return err
- } else if l == nil || l.inner == nil {
- addLogToBuffer(bufferFunc)
- }
- err := formatErrorc(message, context...)
- if fallbackStderr {
- fmt.Fprintf(os.Stderr, "%s: %s\n", logLevel.String(), err.Error())
- }
- return err
-}
-
// Trace logs at the trace level
func Trace(v ...interface{}) {
log(seelog.TraceLvl, func() { Trace(v...) }, logger.trace, v...)
@@ -723,7 +912,7 @@ func ErrorfStackDepth(depth int, format string, params ...interface{}) error {
msg := fmt.Sprintf(format, params...)
return logWithError(seelog.ErrorLvl, func() { ErrorStackDepth(depth, msg) }, func(s string) error {
return logger.errorStackDepth(s, depth)
- }, false, msg)
+ }, true, msg)
}
// ErrorcStackDepth logs at the error level with context and the current stack depth plus the additional given one and returns an error containing the formated log message
@@ -822,6 +1011,10 @@ func CriticalStackDepth(depth int, v ...interface{}) error {
}, true, v...)
}
+/*
+* JMX Logger Section
+ */
+
// JMXError Logs for JMX check
func JMXError(v ...interface{}) error {
return logWithError(seelog.ErrorLvl, func() { JMXError(v...) }, jmxLogger.error, true, v...)
@@ -832,78 +1025,7 @@ func JMXInfo(v ...interface{}) {
log(seelog.InfoLvl, func() { JMXInfo(v...) }, jmxLogger.info, v...)
}
-// Flush flushes the underlying inner log
-func Flush() {
- l := logger.Load()
- if l != nil && l.inner != nil {
- l.inner.Flush()
- }
- l = jmxLogger.Load()
- if l != nil && l.inner != nil {
- l.inner.Flush()
- }
-}
-
-// ReplaceLogger allows replacing the internal logger, returns old logger
-func ReplaceLogger(li seelog.LoggerInterface) seelog.LoggerInterface {
- l := logger.Load()
- if l != nil && l.inner != nil {
- return l.replaceInnerLogger(li)
- }
-
- return nil
-}
-
-// RegisterAdditionalLogger registers an additional logger for logging
-func RegisterAdditionalLogger(n string, li seelog.LoggerInterface) error {
- l := logger.Load()
- if l != nil && l.inner != nil {
- return l.registerAdditionalLogger(n, li)
- }
-
- return errors.New("cannot register: logger not initialized")
-}
-
-// ShouldLog returns whether a given log level should be logged by the default logger
-func ShouldLog(lvl seelog.LogLevel) bool {
- l := logger.Load()
- if l != nil {
- return l.shouldLog(lvl)
- }
- return false
-}
-
-// GetLogLevel returns a seelog native representation of the current
-// log level
-func GetLogLevel() (seelog.LogLevel, error) {
- l := logger.Load()
- if l != nil && l.inner != nil {
- return l.getLogLevel(), nil
- }
-
- // need to return something, just set to Info (expected default)
- return seelog.InfoLvl, errors.New("cannot get loglevel: logger not initialized")
-}
-
-// ChangeLogLevel changes the current log level, valid levels are trace, debug,
-// info, warn, error, critical and off, it requires a new seelog logger because
-// an existing one cannot be updated
-func ChangeLogLevel(li seelog.LoggerInterface, level string) error {
- l := logger.Load()
- if l != nil && l.inner != nil {
- err := l.changeLogLevel(level)
- if err != nil {
- return err
- }
- // See detailed explanation in SetupLogger(...)
- err = li.SetAdditionalStackDepth(defaultStackDepth)
- if err != nil {
- return err
- }
-
- l.replaceInnerLogger(li)
- return nil
- }
- // need to return something, just set to Info (expected default)
- return errors.New("cannot change loglevel: logger not initialized")
+// SetupJMXLogger setup JMXfetch specific logger
+func SetupJMXLogger(i seelog.LoggerInterface, level string) {
+ jmxLogger.Store(setupCommonLogger(i, level))
}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_limit.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_limit.go
new file mode 100644
index 0000000000..290382f333
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_limit.go
@@ -0,0 +1,74 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package log
+
+import (
+ "time"
+
+ "go.uber.org/atomic"
+)
+
+// Limit is a utility that can be used to avoid logging noisily
+type Limit struct {
+ // n is the times remaining that the Limit will return true for ShouldLog.
+ // we repeatedly subtract 1 from it, if it is nonzero.
+ n *atomic.Int32
+
+ // exit and ticker must be different channels
+ // because Stopping a ticker will not close the ticker channel,
+ // and we will otherwise leak memory
+ ticker *time.Ticker
+ exit chan struct{}
+}
+
+// NewLogLimit creates a Limit where shouldLog will return
+// true the first N times it is called, and will return true once every
+// interval thereafter.
+func NewLogLimit(n int, interval time.Duration) *Limit {
+ l := &Limit{
+ n: atomic.NewInt32(int32(n)),
+ ticker: time.NewTicker(interval),
+ exit: make(chan struct{}),
+ }
+
+ go l.resetLoop()
+ return l
+}
+
+// ShouldLog returns true if the caller should log
+func (l *Limit) ShouldLog() bool {
+ n := l.n.Load()
+ if n > 0 {
+ // try to decrement n, doing nothing on concurrent attempts
+ l.n.CompareAndSwap(n, n-1)
+ return true
+ }
+
+ return false
+}
+
+// Close will stop the underlying ticker
+func (l *Limit) Close() {
+ l.ticker.Stop()
+ close(l.exit)
+}
+
+func (l *Limit) resetLoop() {
+ for {
+ select {
+ case <-l.ticker.C:
+ l.resetCounter()
+ case <-l.exit:
+ return
+ }
+ }
+}
+
+func (l *Limit) resetCounter() {
+ // c.n == 0, it means we have gotten through the first few logs, and after ticker.T we should
+ // do another log
+ l.n.CompareAndSwap(0, 1)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_test_init.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_test_init.go
new file mode 100644
index 0000000000..7dd6bcd7e3
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/log/log_test_init.go
@@ -0,0 +1,22 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build test
+
+package log
+
+import (
+ "os"
+
+ "github.com/cihub/seelog"
+)
+
+func init() {
+ level := os.Getenv("DD_LOG_LEVEL")
+ if level == "" {
+ level = "debug"
+ }
+ SetupLogger(seelog.Default, level)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/optional/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/util/optional/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/optional/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/optional/optional.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/optional/optional.go
new file mode 100644
index 0000000000..aa0437139d
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/optional/optional.go
@@ -0,0 +1,92 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package optional has optional types and functions used by Agent.
+package optional
+
+// Option represents an optional type.
+// By default, no value is set and a call to Get() returns (T{}, false)
+type Option[T any] struct {
+ value T
+ set bool
+}
+
+// NewOption creates a new instance of Option[T] with a value set. A call to Get() will returns (value, true)
+func NewOption[T any](value T) Option[T] {
+ return Option[T]{
+ value: value,
+ set: true,
+ }
+}
+
+// NewOptionPtr creates a new instance of Option[T] with a value set. A call to Get() will returns (value, true)
+func NewOptionPtr[T any](value T) *Option[T] {
+ option := NewOption[T](value)
+ return &option
+}
+
+// NewNoneOption creates a new instance of Option[T] without any value set.
+func NewNoneOption[T any]() Option[T] {
+ return Option[T]{}
+}
+
+// NewNoneOptionPtr creates a new instance of Option[T] without any value set.
+func NewNoneOptionPtr[T any]() *Option[T] {
+ option := NewNoneOption[T]()
+ return &option
+}
+
+// Get returns the value and true if a value is set, otherwise it returns (undefined, false).
+func (o *Option[T]) Get() (T, bool) {
+ return o.value, o.set
+}
+
+// Set sets a new value.
+func (o *Option[T]) Set(value T) {
+ o.value = value
+ o.set = true
+}
+
+// Reset removes the value set.
+func (o *Option[T]) Reset() {
+ o.set = false
+}
+
+// MapOption returns fct(value) if a value is set, otherwise it returns NewNoneOption[T2]().
+func MapOption[T1 any, T2 any](optional Option[T1], fct func(T1) T2) Option[T2] {
+ value, ok := optional.Get()
+ if !ok {
+ return NewNoneOption[T2]()
+ }
+ return NewOption(fct(value))
+}
+
+// UnmarshalYAML unmarshals an Option[T] from YAML
+func (o *Option[T]) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var v T
+ err := unmarshal(&v)
+ if err != nil {
+ *o = NewNoneOption[T]()
+ return err
+ }
+ *o = NewOption[T](v)
+ return nil
+}
+
+// SetIfNone sets the value if it is not already set.
+// Does nothing if the current instance is already set.
+func (o *Option[T]) SetIfNone(value T) {
+ if !o.set {
+ o.Set(value)
+ }
+}
+
+// SetOptionIfNone sets the option if it is not already set.
+// Does nothing if the current instance is already set.
+func (o *Option[T]) SetOptionIfNone(option Option[T]) {
+ if !o.set {
+ *o = option
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/scrubber/default.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/scrubber/default.go
index d054c18569..6de68ff0e4 100644
--- a/vendor/github.com/DataDog/datadog-agent/pkg/util/scrubber/default.go
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/scrubber/default.go
@@ -8,6 +8,7 @@ package scrubber
import (
"fmt"
"regexp"
+ "slices"
"strings"
"sync"
)
@@ -135,7 +136,7 @@ func AddDefaultReplacers(scrubber *Scrubber) {
return ""
}
if len(apiKey) == 32 {
- return "***************************" + apiKey[len(apiKey)-5:]
+ return HideKeyExceptLastFiveChars(apiKey)
}
}
return defaultReplacement
@@ -151,7 +152,7 @@ func AddDefaultReplacers(scrubber *Scrubber) {
return ""
}
if len(appKey) == 40 {
- return "***********************************" + appKey[len(appKey)-5:]
+ return HideKeyExceptLastFiveChars(appKey)
}
}
return defaultReplacement
@@ -327,9 +328,25 @@ func ScrubLine(url string) string {
return DefaultScrubber.ScrubLine(url)
}
+// HideKeyExceptLastFiveChars replaces all characters in the key with "*", except
+// for the last 5 characters. If the key is an unrecognized length, replace
+// all of it with the default string of "*"s instead.
+func HideKeyExceptLastFiveChars(key string) string {
+ if len(key) != 32 && len(key) != 40 {
+ return defaultReplacement
+ }
+ return strings.Repeat("*", len(key)-5) + key[len(key)-5:]
+}
+
// AddStrippedKeys adds to the set of YAML keys that will be recognized and have their values stripped. This modifies
// the DefaultScrubber directly and be added to any created scrubbers.
func AddStrippedKeys(strippedKeys []string) {
+ // API and APP keys are already handled by default rules
+ strippedKeys = slices.Clone(strippedKeys)
+ strippedKeys = slices.DeleteFunc(strippedKeys, func(s string) bool {
+ return s == "api_key" || s == "app_key"
+ })
+
if len(strippedKeys) > 0 {
replacer := matchYAMLKey(
fmt.Sprintf("(%s)", strings.Join(strippedKeys, "|")),
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/parallel_stopper.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/parallel_stopper.go
new file mode 100644
index 0000000000..431fdaec1d
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/parallel_stopper.go
@@ -0,0 +1,50 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package startstop
+
+import (
+ "sync"
+)
+
+// parallelStopper stops a set of components in parallel.
+type parallelStopper struct {
+ components []Stoppable
+}
+
+var _ Stopper = ¶llelStopper{}
+var _ Stoppable = ¶llelStopper{}
+
+// NewParallelStopper returns a new parallel stopper.
+//
+// The Stop() method of this object will stop all components concurrently,
+// calling each component's Stop method in a dedicated goroutine. It will
+// return only when all Stop calls have completed.
+//
+// Any components included in the arguments will be included in the
+// set of components, as if stopper.Add(..) had been called for each.
+func NewParallelStopper(components ...Stoppable) Stopper {
+ return ¶llelStopper{
+ components: components,
+ }
+}
+
+// Add implements Stopper#Add.
+func (g *parallelStopper) Add(components ...Stoppable) {
+ g.components = append(g.components, components...)
+}
+
+// Stop implements Stoppable#Stop.
+func (g *parallelStopper) Stop() {
+ wg := &sync.WaitGroup{}
+ for _, component := range g.components {
+ wg.Add(1)
+ go func(s Stoppable) {
+ s.Stop()
+ wg.Done()
+ }(component)
+ }
+ wg.Wait()
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/serial_stopper.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/serial_stopper.go
new file mode 100644
index 0000000000..7578f9b46c
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/serial_stopper.go
@@ -0,0 +1,39 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package startstop
+
+// serialStopper stops a set of components in series.
+type serialStopper struct {
+ components []Stoppable
+}
+
+var _ Stopper = &serialStopper{}
+var _ Stoppable = &serialStopper{}
+
+// NewSerialStopper returns a new serial stopper.
+//
+// The Stop() method of this object will stop all components, one
+// by one, in the order they were added.
+//
+// Any components included in the arguments will be included in the
+// set of components, as if stopper.Add(..) had been called for each.
+func NewSerialStopper(components ...Stoppable) Stopper {
+ return &serialStopper{
+ components: components,
+ }
+}
+
+// Add implements Stopper#Add.
+func (g *serialStopper) Add(components ...Stoppable) {
+ g.components = append(g.components, components...)
+}
+
+// Add implements Stoppable#Stop.
+func (g *serialStopper) Stop() {
+ for _, stopper := range g.components {
+ stopper.Stop()
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/start.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/start.go
new file mode 100644
index 0000000000..b1aedd850b
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/start.go
@@ -0,0 +1,17 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package startstop
+
+// Startable represents a startable object
+type Startable interface {
+ Start()
+}
+
+// Starter starts a group of startable objects from a data pipeline
+type Starter interface {
+ Startable
+ Add(components ...Startable)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/starter.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/starter.go
new file mode 100644
index 0000000000..42c9f1abd0
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/starter.go
@@ -0,0 +1,39 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package startstop
+
+// starter starts a set of components in series.
+type starter struct {
+ components []Startable
+}
+
+var _ Starter = &starter{}
+var _ Startable = &starter{}
+
+// NewStarter returns a new serial starter.
+//
+// The Start() method of this object will start all components, one
+// by one, in the order they were added.
+//
+// Any components included in the arguments will be included in the
+// set of components, as if starter.Add(..) had been called for each.
+func NewStarter(components ...Startable) Starter {
+ return &starter{
+ components: components,
+ }
+}
+
+// Add implements Starter#Add.
+func (s *starter) Add(components ...Startable) {
+ s.components = append(s.components, components...)
+}
+
+// Start implements Startable#Start.
+func (s *starter) Start() {
+ for _, c := range s.components {
+ c.Start()
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/startstop.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/startstop.go
new file mode 100644
index 0000000000..ae2f0101ed
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/startstop.go
@@ -0,0 +1,18 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package startstop provides useful functionality for starting and stopping agent
+// components.
+//
+// The Startable and Stoppable interfaces define components that can be started and
+// stopped, respectively. The package then provides utility functionality to start
+// and stop components either concurrently or in series.
+package startstop
+
+// StartStoppable represents a startable and stopable object
+type StartStoppable interface {
+ Startable
+ Stoppable
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/stop.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/stop.go
new file mode 100644
index 0000000000..01390badf0
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/startstop/stop.go
@@ -0,0 +1,17 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package startstop
+
+// Stoppable represents a stoppable object
+type Stoppable interface {
+ Stop()
+}
+
+// Stopper stops a group of stoppable objects from a data pipeline
+type Stopper interface {
+ Stoppable
+ Add(components ...Stoppable)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/statstracker/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/util/statstracker/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/statstracker/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/statstracker/stats_tracker.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/statstracker/stats_tracker.go
new file mode 100644
index 0000000000..e25543ea5e
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/statstracker/stats_tracker.go
@@ -0,0 +1,185 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package statstracker keeps track of simple stats in the Agent.
+package statstracker
+
+import (
+ "fmt"
+ "sync"
+ "time"
+)
+
+type timeProvider func() int64
+
+type taggedPoint struct {
+ timeStamp int64
+ value int64
+ count int64
+}
+
+// Tracker Keeps track of simple stats over its lifetime and a configurable time range.
+// Tracker is designed to be memory efficient by aggregating data into buckets. For example
+// a time frame of 24 hours with a bucketFrame of 1 hour will ensure that only 24 points are ever
+// kept in memory. New data is considered in the stats immediately while old data is removed by
+// dropping expired aggregated buckets.
+type Tracker struct {
+ allTimeAvg int64
+ allTimePeak int64
+ totalPoints int64
+ timeFrame int64
+ bucketFrame int64
+ avgPointsHead *taggedPoint
+ peakPointsHead *taggedPoint
+ aggregatedAvgPoints []*taggedPoint
+ aggregatedPeakPoints []*taggedPoint
+ timeProvider timeProvider
+ lock *sync.Mutex
+}
+
+// NewTracker Creates a new Tracker instance
+func NewTracker(timeFrame time.Duration, bucketSize time.Duration) *Tracker {
+ return NewTrackerWithTimeProvider(timeFrame, bucketSize, func() int64 {
+ return time.Now().UnixNano()
+ })
+}
+
+// NewTrackerWithTimeProvider Creates a new Tracker instance with a time provider closure (mostly for testing)
+func NewTrackerWithTimeProvider(timeFrame time.Duration, bucketSize time.Duration, timeProvider timeProvider) *Tracker {
+ return &Tracker{
+ aggregatedAvgPoints: make([]*taggedPoint, 0),
+ aggregatedPeakPoints: make([]*taggedPoint, 0),
+ timeFrame: int64(timeFrame),
+ bucketFrame: int64(bucketSize),
+ timeProvider: timeProvider,
+ lock: &sync.Mutex{},
+ }
+}
+
+// Add Records a new value to the stats tracker
+func (s *Tracker) Add(value int64) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ s.allTimeAvg = (s.totalPoints*s.allTimeAvg + value) / (s.totalPoints + 1)
+ s.totalPoints++
+
+ if value > s.allTimePeak {
+ s.allTimePeak = value
+ }
+
+ now := s.timeProvider()
+
+ s.dropOldPoints(now)
+
+ if s.avgPointsHead == nil {
+ s.avgPointsHead = &taggedPoint{now, value, 0}
+ s.peakPointsHead = &taggedPoint{now, value, 0}
+ } else if s.peakPointsHead.value < value {
+ s.peakPointsHead.value = value
+ }
+
+ // We initialized avgPointsHead with the first value, don't count it twice
+ if s.avgPointsHead.count > 0 {
+ s.avgPointsHead.value = (s.avgPointsHead.count*s.avgPointsHead.value + value) / (s.avgPointsHead.count + 1)
+ }
+ s.avgPointsHead.count++
+}
+
+// AllTimeAvg Gets the all time average of values seen so far
+func (s *Tracker) AllTimeAvg() int64 {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ return s.allTimeAvg
+}
+
+// MovingAvg Gets the moving average of values within the time frame
+func (s *Tracker) MovingAvg() int64 {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ s.dropOldPoints(s.timeProvider())
+
+ if s.avgPointsHead == nil {
+ return 0
+ }
+ sum := s.avgPointsHead.value * s.avgPointsHead.count
+ count := s.avgPointsHead.count
+ for _, v := range s.aggregatedAvgPoints {
+ sum += v.value * v.count
+ count += v.count
+ }
+ return sum / count
+}
+
+// AllTimePeak Gets the largest value seen so far
+func (s *Tracker) AllTimePeak() int64 {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ return s.allTimePeak
+}
+
+// MovingPeak Gets the largest value seen within the time frame
+func (s *Tracker) MovingPeak() int64 {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ s.dropOldPoints(s.timeProvider())
+
+ if s.peakPointsHead == nil {
+ return 0
+ }
+ largest := s.peakPointsHead.value
+ for _, v := range s.aggregatedPeakPoints {
+ if v.value > largest {
+ largest = v.value
+ }
+ }
+ return largest
+}
+
+func (s *Tracker) dropOldPoints(now int64) {
+ if s.avgPointsHead != nil && s.avgPointsHead.timeStamp < now-s.bucketFrame {
+ // Pop off the oldest values
+ if len(s.aggregatedAvgPoints) > 0 {
+ dropFromIndex := 0
+ for _, v := range s.aggregatedAvgPoints {
+ if v.timeStamp > now-s.timeFrame {
+ break
+ }
+ dropFromIndex++
+ }
+
+ s.aggregatedAvgPoints = s.aggregatedAvgPoints[dropFromIndex:]
+ s.aggregatedPeakPoints = s.aggregatedPeakPoints[dropFromIndex:]
+ }
+
+ // Add the new aggregated point to the slice
+ s.aggregatedAvgPoints = append(s.aggregatedAvgPoints, s.avgPointsHead)
+ s.aggregatedPeakPoints = append(s.aggregatedPeakPoints, s.peakPointsHead)
+ s.avgPointsHead = nil
+ s.peakPointsHead = nil
+ }
+}
+
+// InfoKey returns the key
+func (s *Tracker) InfoKey() string {
+ return "Pipeline Latency"
+}
+
+// Info returns the Tracker as a formatted string slice.
+func (s *Tracker) Info() []string {
+ AllTimeAvgLatency := s.AllTimeAvg() / int64(time.Millisecond)
+ AllTimePeakLatency := s.AllTimePeak() / int64(time.Millisecond)
+ RecentAvgLatency := s.MovingAvg() / int64(time.Millisecond)
+ RecentPeakLatency := s.MovingPeak() / int64(time.Millisecond)
+
+ return []string{
+ fmt.Sprintf("Average Latency (ms): %d", AllTimeAvgLatency),
+ fmt.Sprintf("24h Average Latency (ms): %d", RecentAvgLatency),
+ fmt.Sprintf("Peak Latency (ms): %d", AllTimePeakLatency),
+ fmt.Sprintf("24h Peak Latency (ms): %d", RecentPeakLatency),
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/system/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/system/cpu.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/cpu.go
new file mode 100644
index 0000000000..83084b7811
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/cpu.go
@@ -0,0 +1,61 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package system provides various helper functions and types to interact with system information
+package system
+
+import (
+ "context"
+ "runtime"
+ "sync"
+ "time"
+
+ "go.uber.org/atomic"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+const (
+ maxHostCPUFailedAttempts = 3
+)
+
+var (
+ hostCPUCount = atomic.NewInt64(0)
+ hostCPUFailedAttempts int
+ hostCPUCountUpdateLock sync.Mutex
+ cpuInfoFunc func(context.Context, bool) (int, error)
+)
+
+// HostCPUCount returns the number of logical CPUs from host
+func HostCPUCount() int {
+ if v := hostCPUCount.Load(); v != 0 {
+ return int(v)
+ }
+
+ hostCPUCountUpdateLock.Lock()
+ defer hostCPUCountUpdateLock.Unlock()
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+
+ cpuCount, err := cpuInfoFunc(ctx, true)
+ if err != nil {
+ hostCPUFailedAttempts++
+ log.Debugf("Unable to get host cpu count, err: %v", err)
+
+ // To maximize backward compatibility and still be able to return
+ // a value which is accurate in most cases.
+ // After max attempts, we give up and cache this value
+ if hostCPUFailedAttempts >= maxHostCPUFailedAttempts {
+ log.Debugf("Permafail while getting host cpu count, will use runtime.NumCPU(), err: %v", err)
+ cpuCount = runtime.NumCPU()
+ } else {
+ return runtime.NumCPU()
+ }
+ }
+ hostCPUCount.Store(int64(cpuCount))
+
+ return cpuCount
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/system/cpu_mock.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/cpu_mock.go
new file mode 100644
index 0000000000..92c0794c9f
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/cpu_mock.go
@@ -0,0 +1,17 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build test
+
+package system
+
+const (
+ // Arbitrary CPU count used for unit tests
+ defaultCPUCountUnitTest = 3
+)
+
+func init() {
+ hostCPUCount.Store(defaultCPUCountUnitTest)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/system/cpu_unix.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/cpu_unix.go
new file mode 100644
index 0000000000..c963b5e914
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/cpu_unix.go
@@ -0,0 +1,16 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build !windows
+
+package system
+
+import (
+ "github.com/shirou/gopsutil/v3/cpu"
+)
+
+func init() {
+ cpuInfoFunc = cpu.CountsWithContext
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/system/cpu_windows.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/cpu_windows.go
new file mode 100644
index 0000000000..cbf8a28ca8
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/cpu_windows.go
@@ -0,0 +1,19 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package system
+
+import (
+ "context"
+ "runtime"
+)
+
+func init() {
+ // TODO: Implement proper CPU Count for Windows too
+ // As runtime.NumCPU() supports Windows CPU Affinity
+ cpuInfoFunc = func(context.Context, bool) (int, error) {
+ return runtime.NumCPU(), nil
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/system/file_linux.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/file_linux.go
new file mode 100644
index 0000000000..c4bba23679
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/file_linux.go
@@ -0,0 +1,55 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux
+
+package system
+
+import (
+ "os"
+ "path/filepath"
+ "strconv"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+// CountProcessesFileDescriptors returns the sum of open file descriptors for all given PIDs.
+// Failed PIDs are silently skipped.
+// A boolean is returned to indicate whether all PIDs failed or not.
+func CountProcessesFileDescriptors(procPath string, pids []int) (uint64, bool) {
+ // Compute the number of open FDs
+ allErrors := true
+ var fdSum int
+ for _, pid := range pids {
+ fdsPerPid, err := CountProcessFileDescriptors(procPath, pid)
+ if err != nil {
+ log.Tracef("Unable to get number of FDs for pid: %d", pid)
+ } else {
+ allErrors = false
+ fdSum += fdsPerPid
+ }
+ }
+
+ return uint64(fdSum), allErrors
+}
+
+// CountProcessFileDescriptors gets the number of open file descriptors for a given pid
+func CountProcessFileDescriptors(procPath string, pid int) (int, error) {
+ // Open proc file descriptor dir
+ fdPath := filepath.Join(procPath, strconv.Itoa(pid), "fd")
+ d, err := os.Open(fdPath)
+ if err != nil {
+ return 0, err
+ }
+ defer d.Close()
+
+ // Get all file names
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return 0, err
+ }
+
+ return len(names), nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/system/namespace_linux.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/namespace_linux.go
new file mode 100644
index 0000000000..2df423901b
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/namespace_linux.go
@@ -0,0 +1,62 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux
+
+package system
+
+import (
+ "os"
+ "path/filepath"
+ "sync"
+ "syscall"
+
+ "github.com/DataDog/datadog-agent/pkg/util/pointer"
+)
+
+// From https://github.com/torvalds/linux/blob/5859a2b1991101d6b978f3feb5325dad39421f29/include/linux/proc_ns.h#L41-L49
+// Currently, host namespace inode number are hardcoded, which can be used to detect
+// if we're running in host namespace or not (does not work when running in DinD)
+const (
+ hostUTSNamespecInode = 0xEFFFFFFE
+)
+
+var (
+ netNSPid1 uint64
+ syncNetNSPid1 sync.Once
+)
+
+// GetProcessNamespaceInode performs a stat() call on /proc//ns/
+func GetProcessNamespaceInode(procPath string, pid string, namespace string) (uint64, error) {
+ nsPath := filepath.Join(procPath, pid, "ns", namespace)
+ fi, err := os.Stat(nsPath)
+ if err != nil {
+ return 0, err
+ }
+
+ // We are on linux, casting in safe
+ return fi.Sys().(*syscall.Stat_t).Ino, nil
+}
+
+// IsProcessHostNetwork compares namespaceID (inode behind /proc//ns/net returned by GetProcessNamespaceInode)
+// to PID 1 namespace id, which we assume runs in host network namespace
+func IsProcessHostNetwork(procPath string, namespaceID uint64) *bool {
+ syncNetNSPid1.Do(func() {
+ netNSPid1, _ = GetProcessNamespaceInode(procPath, "1", "net")
+ })
+
+ if netNSPid1 == 0 {
+ return nil
+ }
+
+ res := netNSPid1 == namespaceID
+ return &res
+}
+
+// IsProcessHostUTSNamespace compares namespaceID with known, harcoded host PID Namespace inode
+// Keeps same signature as `IsProcessHostNetwork` as we may need to change implementation depending on Kernel evolution
+func IsProcessHostUTSNamespace(procPath string, namespaceID uint64) *bool { //nolint:revive // TODO fix revive unused-parameter
+ return pointer.Ptr(namespaceID == hostUTSNamespecInode)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/system/network.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/network.go
new file mode 100644
index 0000000000..3b028b29a7
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/network.go
@@ -0,0 +1,43 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package system
+
+import (
+ "fmt"
+ "net"
+)
+
+// NetworkRoute holds one network destination subnet and it's linked interface name
+type NetworkRoute struct {
+ Interface string
+ Subnet uint64
+ Gateway uint64
+ Mask uint64
+}
+
+// IsLocalAddress returns the given address if it is local or an error if it is not
+func IsLocalAddress(address string) (string, error) {
+ if address == "localhost" {
+ return address, nil
+ }
+ ip := net.ParseIP(address)
+ if ip == nil {
+ return "", fmt.Errorf("address was set to an invalid IP address: %s", address)
+ }
+ for _, cidr := range []string{
+ "127.0.0.0/8", // IPv4 loopback
+ "::1/128", // IPv6 loopback
+ } {
+ _, block, err := net.ParseCIDR(cidr)
+ if err != nil {
+ return "", err
+ }
+ if block.Contains(ip) {
+ return address, nil
+ }
+ }
+ return "", fmt.Errorf("address was set to a non-loopback IP address: %s", address)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/system/network_linux.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/network_linux.go
new file mode 100644
index 0000000000..b6a05ec677
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/network_linux.go
@@ -0,0 +1,181 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build linux
+
+package system
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "net"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/DataDog/datadog-agent/pkg/util/filesystem"
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+// ParseProcessRoutes parses /proc//net/route into a list of NetworkDestionation
+// If PID is 0, it parses /proc/net/route instead
+func ParseProcessRoutes(procPath string, pid int) ([]NetworkRoute, error) {
+ var procNetFile string
+ if pid > 0 {
+ procNetFile = filepath.Join(procPath, strconv.Itoa(pid), "net", "route")
+ } else {
+ procNetFile = filepath.Join(procPath, "net", "route")
+ }
+
+ lines, err := filesystem.ReadLines(procNetFile)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read file at: %s, err: %w", procNetFile, err)
+ }
+ if len(lines) < 1 {
+ return nil, fmt.Errorf("empty network file %s", procNetFile)
+ }
+
+ routes := make([]NetworkRoute, 0, len(lines)-1)
+ for _, line := range lines[1:] {
+ fields := strings.Fields(line)
+ if len(fields) < 8 {
+ continue
+ }
+ dest, err := strconv.ParseUint(fields[1], 16, 32)
+ if err != nil {
+ log.Debugf("Cannot parse destination %q: %v", fields[1], err)
+ continue
+ }
+ gateway, err := strconv.ParseUint(fields[2], 16, 32)
+ if err != nil {
+ log.Debugf("Cannot parse gateway %q: %v", fields[2], err)
+ continue
+ }
+ mask, err := strconv.ParseUint(fields[7], 16, 32)
+ if err != nil {
+ log.Debugf("Cannot parse mask %q: %v", fields[7], err)
+ continue
+ }
+ d := NetworkRoute{
+ Interface: fields[0],
+ Subnet: dest,
+ Gateway: gateway,
+ Mask: mask,
+ }
+ routes = append(routes, d)
+ }
+ return routes, nil
+}
+
+// GetDefaultGateway parses /proc/net/route and extract
+// the first route with Destination == "00000000"
+func GetDefaultGateway(procPath string) (net.IP, error) {
+ routes, err := ParseProcessRoutes(procPath, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, route := range routes {
+ if route.Subnet == 0 {
+ ip := make(net.IP, 4)
+ binary.LittleEndian.PutUint32(ip, uint32(route.Gateway))
+ return ip, nil
+ }
+ }
+
+ return nil, errors.New("no default route found")
+}
+
+// ParseProcessIPs parses /proc//net/fib_trie and returns the /32 IP
+// addresses found. The result does not contain duplicate IPs.
+//
+// Here's an example of /proc//net/fib_trie that shows its format:
+//
+// Main:
+// +-- 0.0.0.0/1 2 0 2
+// +-- 0.0.0.0/4 2 0 2
+// |-- 0.0.0.0
+// /0 universe UNICAST
+// +-- 10.4.0.0/24 2 1 2
+// |-- 10.4.0.0
+// /32 link BROADCAST
+// /24 link UNICAST
+// +-- 10.4.0.192/26 2 0 2
+// |-- 10.4.0.216
+// /32 host LOCAL
+// |-- 10.4.0.255
+// /32 link BROADCAST
+// +-- 127.0.0.0/8 2 0 2
+// +-- 127.0.0.0/31 1 0 0
+// |-- 127.0.0.0
+// /32 link BROADCAST
+// /8 host LOCAL
+// |-- 127.0.0.1
+// /32 host LOCAL
+// |-- 127.255.255.255
+// /32 link BROADCAST
+// Local:
+// +-- 0.0.0.0/1 2 0 2
+// +-- 0.0.0.0/4 2 0 2
+// |-- 0.0.0.0
+// /0 universe UNICAST
+// +-- 10.4.0.0/24 2 1 2
+// |-- 10.4.0.0
+// /32 link BROADCAST
+// /24 link UNICAST
+// +-- 10.4.0.192/26 2 0 2
+// |-- 10.4.0.216
+// /32 host LOCAL
+// |-- 10.4.0.255
+// /32 link BROADCAST
+// +-- 127.0.0.0/8 2 0 2
+// +-- 127.0.0.0/31 1 0 0
+// |-- 127.0.0.0
+// /32 link BROADCAST
+// /8 host LOCAL
+// |-- 127.0.0.1
+// /32 host LOCAL
+// |-- 127.255.255.255
+// /32 link BROADCAST
+//
+// The IPs that we're interested in are the ones that appear above lines that
+// contain "/32 host".
+func ParseProcessIPs(procPath string, pid int, filterFunc func(string) bool) ([]string, error) {
+ var procNetFibTrieFile string
+ if pid > 0 {
+ procNetFibTrieFile = filepath.Join(procPath, strconv.Itoa(pid), "net", "fib_trie")
+ } else {
+ procNetFibTrieFile = filepath.Join(procPath, "net", "fib_trie")
+ }
+
+ lines, err := filesystem.ReadLines(procNetFibTrieFile)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read file at: %s, err: %w", procNetFibTrieFile, err)
+ }
+ if len(lines) < 1 {
+ return nil, fmt.Errorf("empty network file %s", procNetFibTrieFile)
+ }
+
+ IPs := make(map[string]bool)
+ for i, line := range lines {
+ if strings.Contains(line, "/32 host") && i > 0 {
+ split := strings.Split(lines[i-1], "|-- ")
+ if len(split) == 2 {
+ ip := split[1]
+ if filterFunc == nil || filterFunc(ip) {
+ IPs[ip] = true
+ }
+ }
+ }
+ }
+
+ var uniqueIPs []string
+ for IP := range IPs {
+ uniqueIPs = append(uniqueIPs, IP)
+ }
+
+ return uniqueIPs, nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/system/network_stub.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/network_stub.go
new file mode 100644
index 0000000000..790a90b85c
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/network_stub.go
@@ -0,0 +1,26 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build !linux && !windows
+
+package system
+
+import "net"
+
+// ParseProcessRoutes is just a stub for platforms where that's currently not
+// defined (like MacOS). This allows code that refers to this (like the docker
+// check) to at least compile in those platforms, and that's useful for things
+// like running unit tests.
+func ParseProcessRoutes(procPath string, pid int) ([]NetworkRoute, error) { //nolint:revive // TODO fix revive unused-parameter
+ panic("ParseProcessRoutes is not implemented in this environment")
+}
+
+// GetDefaultGateway is just a stub for platforms where that's currently not
+// defined (like MacOS). This allows code that refers to this (like the cluster
+// agent) to at least compile in those platforms, and that's useful for things
+// like running unit tests.
+func GetDefaultGateway(procPath string) (net.IP, error) { //nolint:revive // TODO fix revive unused-parameter
+ panic("GetDefaultGateway is not implemented in this environment")
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/system/network_windows.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/network_windows.go
new file mode 100644
index 0000000000..3fbb7def69
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/network_windows.go
@@ -0,0 +1,114 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build windows
+
+package system
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "net"
+ "os/exec"
+ "strings"
+ "syscall"
+
+ "github.com/DataDog/datadog-agent/pkg/util/winutil/iphelper"
+
+ "golang.org/x/sys/windows"
+)
+
+// ParseProcessRoutes uses routing table
+func ParseProcessRoutes(procPath string, pid int) ([]NetworkRoute, error) { //nolint:revive // TODO fix revive unused-parameter
+ // TODO: Filter by PID
+ routingTable, err := iphelper.GetIPv4RouteTable()
+ if err != nil {
+ return nil, err
+ }
+ interfaceTable, err := iphelper.GetIFTable()
+ if err != nil {
+ return nil, err
+ }
+ netDestinations := make([]NetworkRoute, len(routingTable))
+ for _, row := range routingTable {
+ itf := interfaceTable[row.DwForwardIfIndex]
+ netDest := NetworkRoute{
+ Interface: windows.UTF16ToString(itf.Name[:]),
+ Subnet: uint64(row.DwForwardDest),
+ Mask: uint64(row.DwForwardMask),
+ Gateway: uint64(row.DwForwardNextHop),
+ }
+ netDestinations = append(netDestinations, netDest)
+ }
+ return netDestinations, nil
+}
+
+// GetDefaultGateway returns the default gateway used by container implementation
+func GetDefaultGateway(procPath string) (net.IP, error) { //nolint:revive // TODO fix revive unused-parameter
+ fields, err := defaultGatewayFields()
+ if err != nil {
+ return nil, err
+ }
+ return net.ParseIP(fields[2]), nil
+}
+
+// Output from route print 0.0.0.0:
+//
+// λ route print 0.0.0.0
+// ===========================================================================
+// Interface List
+// 17...00 1c 42 86 10 92 ......Intel(R) 82574L Gigabit Network Connection
+// 16...bc 9a 78 56 34 12 ......Bluetooth Device (Personal Area Network)
+//
+// 1...........................Software Loopback Interface 1
+//
+// 24...00 15 5d 2c 6f c0 ......Hyper-V Virtual Ethernet Adapter #2
+// ===========================================================================
+//
+// IPv4 Route Table
+// ===========================================================================
+// Active Routes:
+// Network Destination Netmask Gateway Interface Metric
+//
+// 0.0.0.0 0.0.0.0 10.211.55.1 10.211.55.4 25
+//
+// ===========================================================================
+// Persistent Routes:
+//
+// Network Address Netmask Gateway Address Metric
+// 0.0.0.0 0.0.0.0 172.21.96.1 Default
+//
+// ===========================================================================
+//
+// IPv6 Route Table
+// ===========================================================================
+// Active Routes:
+//
+// None
+//
+// Persistent Routes:
+//
+// None
+//
+// We are interested in the Gateway and Interface fields of the Active Routes,
+// so this method returns any line that has 5 fields with the first one being
+// 0.0.0.0
+func defaultGatewayFields() ([]string, error) {
+ routeCmd := exec.Command("route", "print", "0.0.0.0")
+ routeCmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: true}
+ output, err := routeCmd.CombinedOutput()
+ if err != nil {
+ return nil, err
+ }
+ scanner := bufio.NewScanner(bytes.NewReader(output))
+ for scanner.Scan() {
+ fields := strings.Fields(scanner.Text())
+ if len(fields) == 5 && fields[0] == "0.0.0.0" {
+ return fields, nil
+ }
+ }
+ return nil, fmt.Errorf("couldn't retrieve default gateway information")
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/system/socket/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/socket/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/socket/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/system/socket/socket.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/socket/socket.go
new file mode 100644
index 0000000000..e9cff69acd
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/socket/socket.go
@@ -0,0 +1,56 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build !windows
+
+// Package socket provides method to check if socket path is available.
+package socket
+
+import (
+ "errors"
+ "net"
+ "os"
+ "time"
+)
+
+// IsAvailable returns if a socket at path is available
+// first boolean returns if socket path exists
+// second boolean returns if socket is reachable
+func IsAvailable(path string, timeout time.Duration) (bool, bool) {
+ if !checkExists(path) {
+ return false, false
+ }
+
+ // Assuming socket file exists (bind() done)
+ // -> but we don't have permission: permission denied
+ // -> but no process associated to socket anymore: connection refused
+ // -> but process did not call listen(): connection refused
+ // -> but process does not call accept(): no error
+ // We'll consider socket available in all cases except if permission is denied
+ // as if a path exists and we do have access, it's likely that a process will re-use it later.
+ conn, err := net.DialTimeout("unix", path, timeout)
+ if err != nil && errors.Is(err, os.ErrPermission) {
+ return true, false
+ }
+
+ if conn != nil {
+ conn.Close()
+ }
+
+ return true, true
+}
+
+func checkExists(path string) bool {
+ f, err := os.Stat(path)
+ if err != nil {
+ return false
+ }
+
+ if f.Mode()&os.ModeSocket != 0 {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/system/socket/socket_windows.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/socket/socket_windows.go
new file mode 100644
index 0000000000..447c3961db
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/system/socket/socket_windows.go
@@ -0,0 +1,39 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package socket provides method to check if socket path is available.
+package socket
+
+import (
+ "os"
+ "time"
+
+ "github.com/Microsoft/go-winio"
+)
+
+// IsAvailable returns named pipe availability
+// as on Windows, sockets do not exist
+func IsAvailable(path string, timeout time.Duration) (bool, bool) {
+ if !checkExists(path) {
+ return false, false
+ }
+
+ conn, err := winio.DialPipe(path, &timeout)
+ if err != nil {
+ return true, false
+ }
+
+ if conn != nil {
+ conn.Close()
+ }
+
+ return true, true
+}
+
+func checkExists(path string) bool {
+ // On Windows there's not easy way to check if a path is a named pipe
+ _, err := os.Stat(path)
+ return err == nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/doc.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/doc.go
new file mode 100644
index 0000000000..c8dcb386d5
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/doc.go
@@ -0,0 +1,8 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2018-present Datadog, Inc.
+//go:build !windows
+
+// Package winutil - Windows utility functions
+package winutil
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/elevated.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/elevated.go
new file mode 100644
index 0000000000..2973ef62f3
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/elevated.go
@@ -0,0 +1,44 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2018-present Datadog, Inc.
+//go:build windows
+
+package winutil
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// IsProcessElevated opens the process token and checks elevation status,
+// returning true if the process is elevated and false if not elevated.
+func IsProcessElevated() (bool, error) {
+ p, e := syscall.GetCurrentProcess()
+ if e != nil {
+ return false, e
+ }
+ var t syscall.Token
+ e = syscall.OpenProcessToken(p, syscall.TOKEN_QUERY, &t)
+ if e != nil {
+ return false, e
+ }
+ defer syscall.CloseHandle(syscall.Handle(t))
+
+ var elevated uint32
+ n := uint32(unsafe.Sizeof(elevated))
+ for {
+ b := make([]byte, n)
+ e := syscall.GetTokenInformation(t, syscall.TokenElevation, &b[0], uint32(len(b)), &n)
+ if e == nil {
+ elevated = *(*uint32)(unsafe.Pointer(&b[0]))
+ return elevated != 0, nil
+ }
+ if e != syscall.ERROR_INSUFFICIENT_BUFFER {
+ return false, e
+ }
+ if n <= uint32(len(b)) {
+ return false, e
+ }
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/eventlog.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/eventlog.go
new file mode 100644
index 0000000000..fb1de7ba4b
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/eventlog.go
@@ -0,0 +1,38 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+//go:build windows
+
+package winutil
+
+import (
+ pkglog "github.com/DataDog/datadog-agent/pkg/util/log"
+ "golang.org/x/sys/windows/svc/eventlog"
+)
+
+// LogEventViewer will open the event viewer API and log a single message
+// to the event viewer. The string identified in the msgnum parameter
+// must exist in the application's message catalog
+// go log api only provides for a single argument to be passed, so can
+// only include one positional argument
+func LogEventViewer(servicename string, msgnum uint32, arg string) {
+ elog, err := eventlog.Open(servicename)
+ if err != nil {
+ pkglog.Errorf("error opening event log with source %v: %v", servicename, err)
+ return
+ }
+ defer elog.Close()
+ switch msgnum & 0xF0000000 {
+ case 0x40000000:
+ // Info level message
+ _ = elog.Info(msgnum, arg)
+ case 0x80000000:
+ // warning level message
+ _ = elog.Warning(msgnum, arg)
+ case 0xC0000000:
+ // error level message
+ _ = elog.Error(msgnum, arg)
+ }
+
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/iisconfig.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/iisconfig.go
new file mode 100644
index 0000000000..be6eb091ba
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/iisconfig.go
@@ -0,0 +1,171 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+//go:build windows
+
+package winutil
+
+import (
+ "encoding/xml"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "sync"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+ "github.com/fsnotify/fsnotify"
+)
+
+var (
+ // make global so that we can override for tests.
+ iisCfgPath = filepath.Join(os.Getenv("windir"), "System32", "inetsrv", "config", "applicationHost.config")
+)
+
+// DynamicIISConfig is an object that will watch the IIS configuration for
+// changes, and reload the configuration when it changes. It provides additional
+// methods for getting specific configuration items
+type DynamicIISConfig struct {
+ watcher *fsnotify.Watcher
+ path string
+ wg sync.WaitGroup
+ mux sync.Mutex
+ stopChannel chan bool
+ xmlcfg *iisConfiguration
+ siteIDToName map[uint32]string
+}
+
+// NewDynamicIISConfig creates a new DynamicIISConfig
+func NewDynamicIISConfig() (*DynamicIISConfig, error) {
+ iiscfg := &DynamicIISConfig{
+ stopChannel: make(chan bool),
+ }
+ var err error
+
+ iiscfg.watcher, err = fsnotify.NewWatcher()
+ if err != nil {
+ return nil, err
+ }
+
+ // check for existence
+ _, err = os.Stat(iisCfgPath)
+ if os.IsNotExist(err) {
+ return nil, err
+ } else if err != nil {
+ return nil, err
+ }
+ iiscfg.path = iisCfgPath
+ return iiscfg, nil
+}
+
+// Start config watcher
+func (iiscfg *DynamicIISConfig) Start() error {
+ if iiscfg == nil {
+ return fmt.Errorf("Null config")
+ }
+ // set the filepath
+ err := iiscfg.watcher.Add(iiscfg.path)
+ if err != nil {
+ return err
+ }
+ err = iiscfg.readXMLConfig()
+ if err != nil {
+ return err
+ }
+ iiscfg.wg.Add(1)
+ go func() {
+ defer iiscfg.wg.Done()
+ for {
+ select {
+ case event := <-iiscfg.watcher.Events:
+ if event.Op&fsnotify.Write == fsnotify.Write {
+ _ = iiscfg.readXMLConfig()
+ }
+ case err = <-iiscfg.watcher.Errors:
+ return
+ case <-iiscfg.stopChannel:
+ return
+ }
+ }
+
+ }()
+ return nil
+}
+
+// Stop config watcher
+func (iiscfg *DynamicIISConfig) Stop() {
+ iiscfg.stopChannel <- true
+ iiscfg.wg.Wait()
+}
+
+type iisVirtualDirectory struct {
+ Path string `xml:"path,attr"`
+ PhysicalPath string `xml:"physicalPath,attr"`
+}
+type iisBinding struct {
+ Protocol string `xml:"protocol,attr"`
+ BindingInfo string `xml:"bindingInformation,attr"`
+}
+type iisApplication struct {
+ XMLName xml.Name `xml:"application"`
+ Path string `xml:"path,attr"`
+ AppPool string `xml:"applicationPool,attr"`
+ VirtualDirs []iisVirtualDirectory `xml:"virtualDirectory"`
+}
+type iisSite struct {
+ Name string `xml:"name,attr"`
+ SiteID string `xml:"id,attr"`
+ Application iisApplication
+ Bindings []iisBinding `xml:"bindings>binding"`
+}
+type iisSystemApplicationHost struct {
+ XMLName xml.Name `xml:"system.applicationHost"`
+ Sites []iisSite `xml:"sites>site"`
+}
+type iisConfiguration struct {
+ XMLName xml.Name `xml:"configuration"`
+ ApplicationHost iisSystemApplicationHost
+}
+
+func (iiscfg *DynamicIISConfig) readXMLConfig() error {
+ var newcfg iisConfiguration
+ f, err := os.ReadFile(iiscfg.path)
+ if err != nil {
+ return err
+ }
+ err = xml.Unmarshal(f, &newcfg)
+ if err != nil {
+ return err
+ }
+ idmap := make(map[uint32]string)
+
+ for _, site := range newcfg.ApplicationHost.Sites {
+ id, err := strconv.Atoi(site.SiteID)
+ if err != nil {
+ return err
+ }
+ idmap[uint32(id)] = site.Name
+ }
+ iiscfg.mux.Lock()
+ defer iiscfg.mux.Unlock()
+ iiscfg.xmlcfg = &newcfg
+ iiscfg.siteIDToName = idmap
+ return nil
+}
+
+// GetSiteNameFromID looks up a site name by its site ID
+func (iiscfg *DynamicIISConfig) GetSiteNameFromID(id uint32) string {
+ if iiscfg == nil {
+ log.Warnf("GetSiteNameFromId %d NIL", id)
+ return ""
+ }
+ var val string
+ var ok bool
+ iiscfg.mux.Lock()
+ defer iiscfg.mux.Unlock()
+ if val, ok = iiscfg.siteIDToName[id]; !ok {
+ return ""
+ }
+ return val
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/iphelper/adapters.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/iphelper/adapters.go
new file mode 100644
index 0000000000..d5b5c7c382
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/iphelper/adapters.go
@@ -0,0 +1,139 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2018-present Datadog, Inc.
+//go:build windows
+
+package iphelper
+
+import (
+ "C"
+
+ "fmt"
+ "net"
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var (
+ procGetAdaptersAddresses = modiphelper.NewProc("GetAdaptersAddresses")
+)
+
+// IPAdapterUnicastAddress is a Go approximation of IP_ADAPTER_UNICAST_ADDRESS_LH
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/iptypes/ns-iptypes-ip_adapter_unicast_address_lh
+type IPAdapterUnicastAddress struct {
+ Flags uint32
+ Address net.IP
+}
+
+type sockaddr struct {
+ family int16
+ port uint16
+ // if it's ipv4, the address is the first 4 bytes
+ // if it's ipv6, the address is bytes 4->20
+ addressBase uintptr
+}
+type socketAddress struct {
+ lpSockaddr *sockaddr
+ iSockaddrLength int32
+}
+type ipAdapterUnicastAddress struct {
+ length uint32
+ flags uint32
+ next *ipAdapterUnicastAddress
+ address socketAddress
+}
+
+// IPAdapterAddressesLh is a go adaptation of the C structure IP_ADAPTER_ADDRESSES_LH
+// it is a go adaptation, rather than a matching structure, because the real structure
+// is difficult to approximate in Go.
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/iptypes/ns-iptypes-ip_adapter_addresses_lh
+type IPAdapterAddressesLh struct {
+ Index uint32
+ AdapterName string
+ UnicastAddresses []IPAdapterUnicastAddress
+}
+
+type ipAdapterAddresses struct {
+ length uint32
+ ifIndex uint32
+ next *ipAdapterAddresses
+ adapterName unsafe.Pointer // pointer to character buffer
+ firstUnicastAddress *ipAdapterUnicastAddress
+}
+
+// GetAdaptersAddresses returns a map of all of the adapters, indexed by
+// interface index
+func GetAdaptersAddresses() (table map[uint32]IPAdapterAddressesLh, err error) {
+ size := uint32(15 * 1024)
+ rawbuf := make([]byte, size)
+
+ r, _, _ := procGetAdaptersAddresses.Call(uintptr(syscall.AF_INET),
+ uintptr(0), // flags == 0 for now
+ uintptr(0), // reserved, always zero
+ uintptr(unsafe.Pointer(&rawbuf[0])),
+ uintptr(unsafe.Pointer(&size)))
+
+ if r != 0 {
+ if r != uintptr(windows.ERROR_BUFFER_OVERFLOW) {
+ err = fmt.Errorf("Error getting address list %v", r)
+ return
+ }
+ rawbuf = make([]byte, size)
+ r, _, _ := procGetAdaptersAddresses.Call(uintptr(syscall.AF_INET),
+ uintptr(0), // flags == 0 for now
+ uintptr(0), // reserved, always zero
+ uintptr(unsafe.Pointer(&rawbuf[0])),
+ uintptr(unsafe.Pointer(&size)))
+ if r != 0 {
+ err = fmt.Errorf("Error getting address list %v", r)
+ return
+ }
+ }
+ // need to walk the list. The list is a C style list. The `Next` pointer
+ // is not the first element. The C structure is as follows
+ /*
+ typedef struct _IP_ADAPTER_ADDRESSES_LH {
+ union {
+ ULONGLONG Alignment;
+ struct {
+ ULONG Length;
+ IF_INDEX IfIndex;
+ };
+ };
+ struct _IP_ADAPTER_ADDRESSES_LH *Next;
+ PCHAR AdapterName;
+ PIP_ADAPTER_UNICAST_ADDRESS_LH FirstUnicastAddress;
+ // more fields follow which we're not using
+ */
+ var addr *ipAdapterAddresses
+ table = make(map[uint32]IPAdapterAddressesLh)
+ addr = (*ipAdapterAddresses)(unsafe.Pointer(&rawbuf[0]))
+ for addr != nil {
+ var entry IPAdapterAddressesLh
+ entry.Index = addr.ifIndex
+ entry.AdapterName = C.GoString((*C.char)(addr.adapterName))
+
+ unicast := addr.firstUnicastAddress
+ for unicast != nil {
+ if unicast.address.lpSockaddr.family == syscall.AF_INET {
+ // ipv4 address
+ var uni IPAdapterUnicastAddress
+ uni.Address = (*[1 << 29]byte)(unsafe.Pointer(unicast.address.lpSockaddr))[4:8:8]
+ entry.UnicastAddresses = append(entry.UnicastAddresses, uni)
+ } else if unicast.address.lpSockaddr.family == syscall.AF_INET6 {
+ var uni IPAdapterUnicastAddress
+ uni.Address = (*[1 << 29]byte)(unsafe.Pointer(&(unicast.address.lpSockaddr.addressBase)))[:16:16]
+ entry.UnicastAddresses = append(entry.UnicastAddresses, uni)
+ }
+ unicast = unicast.next
+ }
+ table[entry.Index] = entry
+ addr = addr.next
+ }
+ return
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/iphelper/routes.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/iphelper/routes.go
new file mode 100644
index 0000000000..2de67ec872
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/iphelper/routes.go
@@ -0,0 +1,217 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2018-present Datadog, Inc.
+//go:build windows
+
+//nolint:revive // TODO(WINA) Fix revive linter
+package iphelper
+
+import (
+ "encoding/binary"
+ "fmt"
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+//revive:disable:var-naming Name is intended to match the Windows API name
+var (
+ modiphelper = windows.NewLazyDLL("Iphlpapi.dll")
+
+ procGetExtendedTcpTable = modiphelper.NewProc("GetExtendedTcpTable")
+ procGetIpForwardTable = modiphelper.NewProc("GetIpForwardTable")
+ procGetIfTable = modiphelper.NewProc("GetIfTable")
+)
+
+//revive:enable:var-naming (API)
+
+//revive:disable:var-naming Name is intended to match the Windows type name
+
+// MIB_TCPROW_OWNER_PID is the matching structure for the IPHelper structure
+// of the same name. Fields documented
+// https://docs.microsoft.com/en-us/windows/win32/api/tcpmib/ns-tcpmib-mib_tcprow_owner_pid
+type MIB_TCPROW_OWNER_PID struct {
+ /* C declaration
+ DWORD dwState;
+ DWORD dwLocalAddr;
+ DWORD dwLocalPort;
+ DWORD dwRemoteAddr;
+ DWORD dwRemotePort;
+ DWORD dwOwningPid; */
+ DwState uint32
+ DwLocalAddr uint32 // network byte order
+ DwLocalPort uint32 // network byte order
+ DwRemoteAddr uint32 // network byte order
+ DwRemotePort uint32 // network byte order
+ DwOwningPid uint32
+}
+
+// MIB_IPFORWARDROW is the matching structure for the IPHelper structure of
+// the same name; it defines a route entry
+// https://docs.microsoft.com/en-us/windows/win32/api/ipmib/ns-ipmib-mib_ipforwardrow
+type MIB_IPFORWARDROW struct {
+ DwForwardDest uint32 // destination IP address. 0.0.0.0 is default route
+ DwForwardMask uint32
+ DwForwardPolicy uint32
+ DwForwardNextHop uint32
+ DwForwardIfIndex uint32
+ DwForwardType uint32
+ DwForwardProto uint32
+ DwForwardAge uint32
+ DwForwardNextHopAS uint32
+ DwForwardMetric1 uint32
+ DwForwardMetric2 uint32
+ DwForwardMetric3 uint32
+ DwForwardMetric4 uint32
+ DwForwardMetric5 uint32
+}
+
+//revive:enable:var-naming (type)
+
+//revive:disable:var-naming Name is intended to match the Windows const name
+
+// TCP_TABLE_CLASS enum
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/iprtrmib/ne-iprtrmib-tcp_table_class
+const (
+ TCP_TABLE_BASIC_LISTENER = uint32(0)
+ TCP_TABLE_BASIC_CONNECTIONS = uint32(1)
+ TCP_TABLE_BASIC_ALL = uint32(2)
+ TCP_TABLE_OWNER_PID_LISTENER = uint32(3)
+ TCP_TABLE_OWNER_PID_CONNECTIONS = uint32(4)
+ TCP_TABLE_OWNER_PID_ALL = uint32(5)
+ TCP_TABLE_OWNER_MODULE_LISTENER = uint32(6)
+ TCP_TABLE_OWNER_MODULE_CONNECTIONS = uint32(7)
+ TCP_TABLE_OWNER_MODULE_ALL = uint32(8)
+)
+
+//revive:enable:var-naming (const)
+
+// GetIPv4RouteTable returns a list of the current ipv4 routes.
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/iphlpapi/nf-iphlpapi-getipforwardtable
+func GetIPv4RouteTable() (table []MIB_IPFORWARDROW, err error) {
+ var size uint32
+ var rawtableentry uintptr
+ r, _, _ := procGetIpForwardTable.Call(rawtableentry,
+ uintptr(unsafe.Pointer(&size)),
+ uintptr(1)) // true, sorted
+
+ if r != uintptr(windows.ERROR_INSUFFICIENT_BUFFER) {
+ err = fmt.Errorf("Unexpected error %v", r)
+ return
+ }
+ rawbuf := make([]byte, size)
+ r, _, _ = procGetIpForwardTable.Call(uintptr(unsafe.Pointer(&rawbuf[0])),
+ uintptr(unsafe.Pointer(&size)),
+ uintptr(1)) // true, sorted
+ if r != 0 {
+ err = fmt.Errorf("Unexpected error %v", r)
+ return
+ }
+ count := uint32(binary.LittleEndian.Uint32(rawbuf))
+
+ entries := (*[1 << 24]MIB_IPFORWARDROW)(unsafe.Pointer(&rawbuf[4]))[:count:count]
+ table = append(table, entries...)
+ return table, nil
+
+}
+
+// GetExtendedTcpV4Table returns a list of ipv4 tcp connections indexed by owning PID
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/iphlpapi/nf-iphlpapi-getextendedtcptable
+//
+//revive:disable-next-line:var-naming Name is intended to match the Windows API name
+func GetExtendedTcpV4Table() (table map[uint32][]MIB_TCPROW_OWNER_PID, err error) {
+ var size uint32
+ var rawtableentry uintptr
+ r, _, _ := procGetExtendedTcpTable.Call(rawtableentry,
+ uintptr(unsafe.Pointer(&size)),
+ uintptr(0), // false, unsorted
+ uintptr(syscall.AF_INET),
+ uintptr(TCP_TABLE_OWNER_PID_ALL),
+ uintptr(0))
+
+ if r != uintptr(windows.ERROR_INSUFFICIENT_BUFFER) {
+ err = fmt.Errorf("Unexpected error %v", r)
+ return
+ }
+ rawbuf := make([]byte, size)
+ r, _, _ = procGetExtendedTcpTable.Call(uintptr(unsafe.Pointer(&rawbuf[0])),
+ uintptr(unsafe.Pointer(&size)),
+ uintptr(0), // false, unsorted
+ uintptr(syscall.AF_INET),
+ uintptr(TCP_TABLE_OWNER_PID_ALL),
+ uintptr(0))
+ if r != 0 {
+ err = fmt.Errorf("Unexpected error %v", r)
+ return
+ }
+ count := uint32(binary.LittleEndian.Uint32(rawbuf))
+ table = make(map[uint32][]MIB_TCPROW_OWNER_PID)
+
+ entries := (*[1 << 24]MIB_TCPROW_OWNER_PID)(unsafe.Pointer(&rawbuf[4]))[:count:count]
+ for _, entry := range entries {
+ pid := entry.DwOwningPid
+
+ table[pid] = append(table[pid], entry)
+
+ }
+ return table, nil
+
+}
+
+// GetIFTable returns a table of interfaces, indexed by the interface index
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/iphlpapi/nf-iphlpapi-getiftable
+func GetIFTable() (table map[uint32]windows.MibIfRow, err error) {
+ var size uint32
+ var rawtableentry uintptr
+ r, _, _ := procGetIfTable.Call(rawtableentry,
+ uintptr(unsafe.Pointer(&size)),
+ uintptr(0)) // false, unsorted
+
+ if r != uintptr(windows.ERROR_INSUFFICIENT_BUFFER) {
+ err = fmt.Errorf("Unexpected error %v", r)
+ return
+ }
+ rawbuf := make([]byte, size)
+ r, _, _ = procGetIfTable.Call(uintptr(unsafe.Pointer(&rawbuf[0])),
+ uintptr(unsafe.Pointer(&size)),
+ uintptr(0)) // false, unsorted
+ if r != 0 {
+ err = fmt.Errorf("Unexpected error %v", r)
+ return
+ }
+ count := uint32(binary.LittleEndian.Uint32(rawbuf))
+ table = make(map[uint32]windows.MibIfRow)
+
+ entries := (*[1 << 20]windows.MibIfRow)(unsafe.Pointer(&rawbuf[4]))[:count:count]
+ for _, entry := range entries {
+ idx := entry.Index
+
+ table[idx] = entry
+
+ }
+ return table, nil
+
+}
+
+// Ntohs converts a network byte order 16 bit int to host byte order
+func Ntohs(i uint16) uint16 {
+ return binary.BigEndian.Uint16((*(*[2]byte)(unsafe.Pointer(&i)))[:])
+}
+
+// Ntohl converts a network byte order 32 bit int to host byte order
+func Ntohl(i uint32) uint32 {
+ return binary.BigEndian.Uint32((*(*[4]byte)(unsafe.Pointer(&i)))[:])
+}
+
+// Htonl converts a host byte order 32 bit int to network byte order
+func Htonl(i uint32) uint32 {
+ b := make([]byte, 4)
+ binary.BigEndian.PutUint32(b, i)
+ return *(*uint32)(unsafe.Pointer(&b[0]))
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/process.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/process.go
new file mode 100644
index 0000000000..58bd218b46
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/process.go
@@ -0,0 +1,401 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build windows
+
+package winutil
+
+import (
+ "fmt"
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+var (
+ modntdll = windows.NewLazyDLL("ntdll.dll")
+ modkernel = windows.NewLazyDLL("kernel32.dll")
+ procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess")
+ procReadProcessMemory = modkernel.NewProc("ReadProcessMemory")
+ procIsWow64Process = modkernel.NewProc("IsWow64Process")
+ procQueryFullProcessImageNameW = modkernel.NewProc("QueryFullProcessImageNameW")
+)
+
+// C definition from winternl.h
+
+//typedef enum _PROCESSINFOCLASS {
+// ProcessBasicInformation = 0,
+// ProcessDebugPort = 7,
+// ProcessWow64Information = 26,
+// ProcessImageFileName = 27,
+// ProcessBreakOnTermination = 29
+//} PROCESSINFOCLASS;
+
+// PROCESSINFOCLASS is the Go representation of the above enum
+type PROCESSINFOCLASS uint32
+
+const (
+ // ProcessBasicInformation returns the PEB type
+ ProcessBasicInformation = PROCESSINFOCLASS(0)
+ // ProcessDebugPort included for completeness
+ ProcessDebugPort = PROCESSINFOCLASS(7)
+ // ProcessWow64Information included for completeness
+ ProcessWow64Information = PROCESSINFOCLASS(26)
+ // ProcessImageFileName included for completeness
+ ProcessImageFileName = PROCESSINFOCLASS(27)
+ // ProcessBreakOnTermination included for completeness
+ ProcessBreakOnTermination = PROCESSINFOCLASS(29)
+)
+
+// IsWow64Process determines if the specified process is running under WOW64
+// that is, if it's a 32 bit process running on 64 bit winodws
+func IsWow64Process(h windows.Handle) (is32bit bool, err error) {
+ var wow64Process uint32
+
+ r, _, _ := procIsWow64Process.Call(uintptr(h),
+ uintptr(unsafe.Pointer(&wow64Process)))
+
+ if r == 0 {
+ return false, windows.GetLastError()
+ }
+ if wow64Process == 0 {
+ is32bit = false
+ } else {
+ is32bit = true
+ }
+ return
+}
+
+// NtQueryInformationProcess wraps the Windows NT kernel call of the same name
+func NtQueryInformationProcess(h windows.Handle, class PROCESSINFOCLASS, target, size uintptr) (err error) {
+ r, _, _ := procNtQueryInformationProcess.Call(uintptr(h),
+ uintptr(class),
+ target,
+ size,
+ uintptr(0))
+ if r != 0 {
+ err = windows.GetLastError()
+ return
+ }
+ return
+}
+
+// ReadProcessMemory wraps the Windows kernel.dll function of the same name
+// https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-readprocessmemory
+func ReadProcessMemory(h windows.Handle, from, to uintptr, count uint32) (bytesRead uint64, err error) {
+ var bytes uint64
+
+ r, _, e := procReadProcessMemory.Call(uintptr(h),
+ from,
+ to,
+ uintptr(count),
+ uintptr(unsafe.Pointer(&bytes)))
+
+ if r == 0 {
+ if e == windows.ERROR_ACCESS_DENIED {
+ log.Debugf("Access denied error getting process memory")
+ } else {
+ log.Warnf("Unexpected error getting process memory for handle (h) %v (err) %v", h, e)
+ }
+ return 0, e
+ }
+ bytesRead = bytes
+ return
+}
+
+type peb32 struct {
+ Reserved1 [2]byte
+ BeingDebugged byte
+ Reserved2 [1]byte
+ Reserved3 [2]uint32
+ Ldr uint32
+ ProcessParameters uint32
+ // more fields...
+}
+
+type unicodeString32 struct {
+ length uint16
+ maxLength uint16
+ buffer uint32
+}
+type procParams32 struct {
+ Reserved1 [16]byte
+ Reserved2 [5]uint32
+ CurrentDirectoryPath unicodeString32
+ CurrentDirectoryHandle uint32
+ DllPath unicodeString32
+ ImagePath unicodeString32
+ commandLine unicodeString32
+ env uint32
+}
+
+// ProcessCommandParams defines process command params
+type ProcessCommandParams struct {
+ CmdLine string
+ ImagePath string
+}
+
+func getCommandParamsForProcess32(h windows.Handle, includeImagePath bool) (*ProcessCommandParams, error) {
+ // get the pointer to the PEB
+ var procmem uintptr
+ size := unsafe.Sizeof(procmem)
+ err := NtQueryInformationProcess(h, ProcessWow64Information, uintptr(unsafe.Pointer(&procmem)), size)
+ if err != nil {
+ // this shouldn't happen because we already know we're asking about
+ // a 32 bit process.
+ return nil, err
+ }
+ var peb peb32
+ var read uint64
+ toRead := uint32(unsafe.Sizeof(peb))
+
+ read, err = ReadProcessMemory(h, procmem, uintptr(unsafe.Pointer(&peb)), toRead)
+ if err != nil {
+ return nil, err
+ }
+ if read != uint64(toRead) {
+ err = fmt.Errorf("Wrong amount of bytes read %v != %v", read, toRead)
+ return nil, err
+ }
+
+ // now go get the actual parameters
+ var pparams procParams32
+ pparamsSize := unsafe.Sizeof(pparams)
+
+ read, err = ReadProcessMemory(h, uintptr(peb.ProcessParameters), uintptr(unsafe.Pointer(&pparams)), uint32(pparamsSize))
+ if err != nil {
+ return nil, err
+ }
+ if read != uint64(pparamsSize) {
+ err = fmt.Errorf("Wrong amount of bytes read %v != %v", read, pparamsSize)
+ return nil, err
+ }
+
+ cmdline, err := readUnicodeString32(h, pparams.commandLine)
+ if err != nil {
+ return nil, err
+ }
+
+ var imagepath string
+ if includeImagePath {
+ imagepath, err = readUnicodeString32(h, pparams.ImagePath)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ procCommandParams := &ProcessCommandParams{
+ CmdLine: cmdline,
+ ImagePath: imagepath,
+ }
+
+ return procCommandParams, nil
+}
+
+func readUnicodeString32(h windows.Handle, u unicodeString32) (string, error) {
+ if u.length > u.maxLength {
+ return "", fmt.Errorf("Invalid unicodeString32, maxLength %v < length %v", u.maxLength, u.length)
+ }
+ // length does not include null terminator, if it exists
+ // allocate two extra bytes so we can add it ourself
+ buf := make([]uint8, u.length+2)
+ read, err := ReadProcessMemory(h, uintptr(u.buffer), uintptr(unsafe.Pointer(&buf[0])), uint32(u.length))
+ if err != nil {
+ return "", err
+ }
+ if read != uint64(u.length) {
+ return "", fmt.Errorf("Wrong amount of bytes read (unicodeString32) %v != %v", read, u.length)
+ }
+ // null terminate string
+ buf = append(buf, 0, 0)
+ return ConvertWindowsString(buf), nil
+}
+
+// this definition taken from Winternl.h
+type unicodeString struct {
+ length uint16
+ maxLength uint16
+ buffer uintptr
+}
+
+type _rtlUserProcessParameters struct {
+ Reserved1 [16]byte
+ Reserved2 [10]uintptr
+ imagePathName unicodeString
+ commandLine unicodeString
+}
+type _peb struct {
+ Reserved1 [2]byte
+ BeingDebugged byte
+ Reserved2 [2]byte
+ Reserved3 [2]uintptr
+ Ldr uintptr // pointer to PEB_LDR_DATA
+ ProcessParameters uintptr // pointer to _rtlUserProcessParameters
+ // lots more stuff
+}
+
+// this definition taken from Winternl.h
+type processBasicInformationStruct struct {
+ Reserved1 uintptr
+ PebBaseAddress uintptr
+ Reserved2 [2]uintptr
+ UniqueProcessID uintptr
+ Reserved3 uintptr
+}
+
+func getCommandParamsForProcess64(h windows.Handle, includeImagePath bool) (*ProcessCommandParams, error) {
+ var pbi processBasicInformationStruct
+ pbisize := unsafe.Sizeof(pbi)
+ err := NtQueryInformationProcess(h, ProcessBasicInformation, uintptr(unsafe.Pointer(&pbi)), pbisize)
+ if err != nil {
+ return nil, err
+ }
+ // read the peb
+ var peb _peb
+ pebsize := unsafe.Sizeof(peb)
+ readsize, err := ReadProcessMemory(h, pbi.PebBaseAddress, uintptr(unsafe.Pointer(&peb)), uint32(pebsize))
+ if err != nil {
+ return nil, err
+ }
+ if readsize != uint64(pebsize) {
+ err = fmt.Errorf("Incorrect read size %v %v", readsize, pebsize)
+ return nil, err
+ }
+
+ // go get the parameters
+ var pparams _rtlUserProcessParameters
+ paramsize := unsafe.Sizeof(pparams)
+ readsize, err = ReadProcessMemory(h, peb.ProcessParameters, uintptr(unsafe.Pointer(&pparams)), uint32(paramsize))
+ if err != nil {
+ return nil, err
+ }
+ if readsize != uint64(paramsize) {
+ return nil, fmt.Errorf("Incorrect read size %v %v", readsize, paramsize)
+ }
+
+ cmdline, err := readUnicodeString(h, pparams.commandLine)
+ if err != nil {
+ return nil, err
+ }
+
+ var imagepath string
+ if includeImagePath {
+ imagepath, err = readUnicodeString(h, pparams.imagePathName)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ procCommandParams := &ProcessCommandParams{
+ CmdLine: cmdline,
+ ImagePath: imagepath,
+ }
+
+ return procCommandParams, nil
+}
+
+func readUnicodeString(h windows.Handle, u unicodeString) (string, error) {
+ if u.length > u.maxLength {
+ return "", fmt.Errorf("Invalid unicodeString, maxLength %v < length %v", u.maxLength, u.length)
+ }
+ // length does not include null terminator, if it exists
+ // allocate two extra bytes so we can add it ourself
+ buf := make([]uint8, u.length+2)
+ read, err := ReadProcessMemory(h, uintptr(u.buffer), uintptr(unsafe.Pointer(&buf[0])), uint32(u.length))
+ if err != nil {
+ return "", err
+ }
+ if read != uint64(u.length) {
+ return "", fmt.Errorf("Wrong amount of bytes read (unicodeString) %v != %v", read, u.length)
+ }
+ // null terminate string
+ buf = append(buf, 0, 0)
+ return ConvertWindowsString(buf), nil
+}
+
+// GetCommandParamsForProcess returns the command line (and optionally image path) for the given process
+func GetCommandParamsForProcess(h windows.Handle, includeImagePath bool) (*ProcessCommandParams, error) {
+ // first need to check if this is a 32 bit process running on win64
+
+ // for now, assumes we are win64
+ is32bit, _ := IsWow64Process(h)
+ if is32bit {
+ return getCommandParamsForProcess32(h, includeImagePath)
+ }
+ return getCommandParamsForProcess64(h, includeImagePath)
+}
+
+// GetCommandParamsForPid returns the command line (and optionally image path) for the given PID
+func GetCommandParamsForPid(pid uint32, includeImagePath bool) (*ProcessCommandParams, error) {
+ h, err := windows.OpenProcess(0x1010, false, uint32(pid))
+ if err != nil {
+ err = fmt.Errorf("Failed to open process %v", err)
+ return nil, err
+ }
+ defer windows.CloseHandle(h)
+ return GetCommandParamsForProcess(h, includeImagePath)
+}
+
+// GetImagePathForProcess returns executable path name in the win32 format
+func GetImagePathForProcess(h windows.Handle) (string, error) {
+ const maxPath = 260
+ // Note that this isn't entirely accurate in all cases, the max can actually be 32K
+ // (requires a registry setting change)
+ // https://docs.microsoft.com/en-us/windows/win32/fileio/maximum-file-path-limitation?tabs=cmd
+ // In this particular case we are opting for MAX_PATH because 32k is a lot to allocate
+ // in most cases where this API will be used (process enumeration loop)
+ var buf [maxPath + 1]uint16
+ n := uint32(len(buf))
+ _, _, lastErr := procQueryFullProcessImageNameW.Call(
+ uintptr(h),
+ uintptr(0),
+ uintptr(unsafe.Pointer(&buf)),
+ uintptr(unsafe.Pointer(&n)))
+ if lastErr.(syscall.Errno) == 0 {
+ return syscall.UTF16ToString(buf[:n]), nil
+ }
+ return "", lastErr
+}
+
+const (
+ processQueryLimitedInformation = windows.PROCESS_QUERY_LIMITED_INFORMATION
+
+ stillActive = windows.STATUS_PENDING
+)
+
+// IsProcess checks to see if a given pid is currently valid in the process table
+func IsProcess(pid int) bool {
+ h, err := windows.OpenProcess(processQueryLimitedInformation, false, uint32(pid))
+ if err != nil {
+ return false
+ }
+ var c windows.NTStatus
+ err = windows.GetExitCodeProcess(h, (*uint32)(&c))
+ windows.Close(h)
+ if err == nil {
+ return c == stillActive
+ }
+ return false
+}
+
+func getProcessStartTimeAsNs(pid uint64) (uint64, error) {
+ h, err := windows.OpenProcess(processQueryLimitedInformation, false, uint32(pid))
+ if err != nil {
+ return 0, fmt.Errorf("Error opening process %v", err)
+ }
+ defer windows.Close(h)
+ var creation windows.Filetime
+ var exit windows.Filetime
+ var krn windows.Filetime
+ var user windows.Filetime
+ err = windows.GetProcessTimes(h, &creation, &exit, &krn, &user)
+ if err != nil {
+ return 0, err
+ }
+ return uint64(creation.Nanoseconds()), nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/scmmonitor.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/scmmonitor.go
new file mode 100644
index 0000000000..6263d80ab6
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/scmmonitor.go
@@ -0,0 +1,202 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+//go:build windows
+
+package winutil
+
+import (
+ "fmt"
+ "sync"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+ "golang.org/x/sys/windows/svc/mgr"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+// ServiceInfo contains name information for each service identified by PID
+type ServiceInfo struct {
+ ServiceName []string
+ DisplayName []string
+}
+
+// ServiceList is the return value from a query by pid.
+type ServiceList struct {
+ essp []*windows.ENUM_SERVICE_STATUS_PROCESS
+ startTime uint64
+}
+
+// SCMMonitor is an object that allows the caller to monitor Windows services.
+// The object will maintain a table of active services indexed by PID
+type SCMMonitor struct {
+ mux sync.Mutex
+ pidToService map[uint64]*ServiceList
+ nonServicePid map[uint64]uint64 // start time of this pid
+ lastMapTime uint64 // ns since jan1 1970
+ serviceRefreshes uint64
+}
+
+// GetServiceMonitor returns a service monitor object
+func GetServiceMonitor() *SCMMonitor {
+ return &SCMMonitor{
+ pidToService: make(map[uint64]*ServiceList),
+ nonServicePid: make(map[uint64]uint64),
+ }
+}
+
+type startTimeFunc func(uint64) (uint64, error)
+
+var pGetProcessStartTimeAsNs = startTimeFunc(getProcessStartTimeAsNs)
+
+// GetRefreshCount returns the number of times we've actually queried
+// the SCM database. used for logging stats.
+func (scm *SCMMonitor) GetRefreshCount() uint64 {
+ return scm.serviceRefreshes
+}
+func (scm *SCMMonitor) refreshCache() error {
+
+ // EnumServiceStatusEx requires only SC_MANAGER_ENUM_SERVICE. Switch to
+ // new library to use least privilege
+ h, err := windows.OpenSCManager(nil, nil, windows.SC_MANAGER_ENUMERATE_SERVICE)
+ if err != nil {
+ log.Warnf("Failed to connect to scm %v", err)
+ return fmt.Errorf("Failed to open SCM %v", err)
+ }
+ m := &mgr.Mgr{Handle: h}
+ defer m.Disconnect()
+
+ var bytesNeeded, servicesReturned uint32
+ var buf []byte
+ for {
+ var p *byte
+ if len(buf) > 0 {
+ p = &buf[0]
+ }
+ err = windows.EnumServicesStatusEx(m.Handle, windows.SC_ENUM_PROCESS_INFO,
+ windows.SERVICE_WIN32, windows.SERVICE_STATE_ALL,
+ p, uint32(len(buf)), &bytesNeeded, &servicesReturned, nil, nil)
+ if err == nil {
+ break
+ }
+ if err != windows.ERROR_MORE_DATA {
+ return fmt.Errorf("Failed to enum services %v", err)
+ }
+ if bytesNeeded <= uint32(len(buf)) {
+ return err
+ }
+ buf = make([]byte, bytesNeeded)
+ }
+ if servicesReturned == 0 {
+ return nil
+ }
+
+ services := unsafe.Slice((*windows.ENUM_SERVICE_STATUS_PROCESS)(unsafe.Pointer(&buf[0])), servicesReturned)
+
+ newmap := make(map[uint64]*ServiceList)
+ for idx, svc := range services {
+ var thissvc *ServiceList
+ var ok bool
+ if thissvc, ok = newmap[uint64(svc.ServiceStatusProcess.ProcessId)]; !ok {
+ thissvc = &ServiceList{}
+ newmap[uint64(svc.ServiceStatusProcess.ProcessId)] = thissvc
+ }
+ thissvc.essp = append(thissvc.essp, &services[idx])
+ }
+ var current windows.Filetime
+ windows.GetSystemTimeAsFileTime(¤t)
+ scm.lastMapTime = uint64(current.Nanoseconds())
+ scm.pidToService = newmap
+ scm.serviceRefreshes++
+ return nil
+}
+
+func (s *ServiceList) toServiceInfo() *ServiceInfo {
+ var si ServiceInfo
+ for _, inf := range s.essp {
+ si.DisplayName = append(si.DisplayName, windows.UTF16PtrToString(inf.DisplayName))
+ si.ServiceName = append(si.ServiceName, windows.UTF16PtrToString(inf.ServiceName))
+ }
+ return &si
+
+}
+
+func (scm *SCMMonitor) getServiceFromCache(pid, pidstart uint64) (*ServiceInfo, error) {
+ var err error
+ if val, ok := scm.pidToService[pid]; ok {
+ // it was in there. see if it's the right one
+ if val.startTime == 0 {
+ // we don't prepopulate the cache with pid start times
+ val.startTime, err = pGetProcessStartTimeAsNs(pid)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if val.startTime == pidstart {
+ // it's the same one.
+ return val.toServiceInfo(), nil
+ }
+ }
+ return nil, nil
+}
+
+// GetServiceInfo gets the service name and display name if the process identified
+// by the pid is in the SCM. A process which is not an SCM controlled service will
+// return nil with no error
+func (scm *SCMMonitor) GetServiceInfo(pid uint64) (*ServiceInfo, error) {
+ // get the process start time of the pid being checked
+ pidstart, err := pGetProcessStartTimeAsNs(pid)
+ if err != nil {
+ return nil, err
+ }
+ scm.mux.Lock()
+ defer scm.mux.Unlock()
+ // check to see if the pid is in the cache of known, not service pids
+ if val, ok := scm.nonServicePid[pid]; ok {
+ // it's a known non service pid. Make sure it's not recycled.
+ if val == pidstart {
+ // it's the same process. We know this isn't a service
+ return nil, nil
+ }
+ // it was in there but the times didn't match, which means
+ // it's a different process. Clean it out.
+ delete(scm.nonServicePid, pid)
+ }
+ // if we get here it either wasn't in the map, or it was but the
+ // start time didn't match
+
+ if pidstart <= scm.lastMapTime {
+ // so it's been around longer than our last check of the service
+ // table. so if it's in there it's probably good.
+ si, err := scm.getServiceFromCache(pid, pidstart)
+ if err != nil {
+ return nil, err
+ }
+ if si != nil {
+ return si, nil
+ }
+ // else
+ scm.nonServicePid[pid] = pidstart
+ return nil, nil
+
+ }
+
+ // if we get here, the process
+ // is newer than the service map.
+ if err = scm.refreshCache(); err != nil {
+ return nil, err
+ }
+ // now check the service map
+ si, err := scm.getServiceFromCache(pid, pidstart)
+ if err != nil {
+ return nil, err
+ }
+ if si != nil {
+ return si, nil
+ }
+ // otherwise put this pid as a known, non service
+ scm.nonServicePid[pid] = pidstart
+ return nil, nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/service.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/service.go
new file mode 100644
index 0000000000..08ee96007f
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/service.go
@@ -0,0 +1,355 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build windows
+
+package winutil
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "fmt"
+ "time"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+ "golang.org/x/sys/windows/svc"
+ "golang.org/x/sys/windows/svc/mgr"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+const (
+ defaultServiceCommandTimeout = 10
+)
+
+var (
+ modadvapi32 = windows.NewLazyDLL("advapi32.dll")
+ procEnumDependentServices = modadvapi32.NewProc("EnumDependentServicesW")
+)
+
+type enumServiceState uint32
+
+// OpenSCManager connects to SCM
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/winsvc/nf-winsvc-openscmanagerw
+func OpenSCManager(desiredAccess uint32) (*mgr.Mgr, error) {
+ h, err := windows.OpenSCManager(nil, nil, desiredAccess)
+ if err != nil {
+ return nil, err
+ }
+ return &mgr.Mgr{Handle: h}, nil
+}
+
+// OpenService opens a handle for serviceName
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/winsvc/nf-winsvc-openservicew
+func OpenService(manager *mgr.Mgr, serviceName string, desiredAccess uint32) (*mgr.Service, error) {
+ h, err := windows.OpenService(manager.Handle, windows.StringToUTF16Ptr(serviceName), desiredAccess)
+ if err != nil {
+ return nil, err
+ }
+ return &mgr.Service{Name: serviceName, Handle: h}, nil
+}
+
+// StartService starts serviceName via SCM.
+//
+// Does not block until service is started
+// https://learn.microsoft.com/en-us/windows/win32/api/winsvc/nf-winsvc-startservicea#remarks
+func StartService(serviceName string, serviceArgs ...string) error {
+
+ manager, err := OpenSCManager(windows.SC_MANAGER_CONNECT)
+ if err != nil {
+ return fmt.Errorf("could not open SCM: %v", err)
+ }
+ defer manager.Disconnect()
+
+ service, err := OpenService(manager, serviceName, windows.SERVICE_START)
+ if err != nil {
+ return fmt.Errorf("could not open service %s: %v", serviceName, err)
+ }
+ defer service.Close()
+
+ err = service.Start(serviceArgs...)
+ if err != nil {
+ return fmt.Errorf("could not start service %s: %v", serviceName, err)
+ }
+ return nil
+}
+
+// ControlService sends a control code to a specified service and waits up to
+// timeout for the service to transition to the requested state
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/winsvc/nf-winsvc-controlservice
+//
+//revive:disable-next-line:var-naming Name is intended to match the Windows API name
+func ControlService(serviceName string, command svc.Cmd, to svc.State, desiredAccess uint32, timeout int64) error {
+
+ manager, err := OpenSCManager(windows.SC_MANAGER_CONNECT)
+ if err != nil {
+ return fmt.Errorf("could not open SCM: %v", err)
+ }
+ defer manager.Disconnect()
+
+ service, err := OpenService(manager, serviceName, desiredAccess)
+ if err != nil {
+ return fmt.Errorf("could not open service %s: %v", serviceName, err)
+ }
+ defer service.Close()
+
+ status, err := service.Control(command)
+ if err != nil {
+ return fmt.Errorf("could not send control %d: %v", command, err)
+ }
+
+ timesup := time.Now().Add(time.Duration(timeout) * time.Second)
+ for status.State != to {
+ if time.Now().After(timesup) {
+ return fmt.Errorf("timeout waiting for service %s to go to state %d; current state: %d", serviceName, to, status.State)
+ }
+ time.Sleep(300 * time.Millisecond)
+ status, err = service.Query()
+ if err != nil {
+ return fmt.Errorf("could not retrieve status for %s: %v", serviceName, err)
+ }
+ }
+ return nil
+}
+
+func doStopService(serviceName string) error {
+ return ControlService(serviceName, svc.Stop, svc.Stopped, windows.SERVICE_STOP|windows.SERVICE_QUERY_STATUS, defaultServiceCommandTimeout)
+}
+
+// StopService stops a service and any services that depend on it
+func StopService(serviceName string) error {
+
+ deps, err := ListDependentServices(serviceName, windows.SERVICE_ACTIVE)
+ if err != nil {
+ return fmt.Errorf("could not list dependent services for %s: %v", serviceName, err)
+ }
+
+ for _, dep := range deps {
+ err = doStopService(dep.serviceName)
+ if err != nil {
+ return fmt.Errorf("could not stop service %s: %v", dep.serviceName, err)
+ }
+ }
+ return doStopService(serviceName)
+}
+
+// WaitForState waits for the service to become the desired state. A timeout can be specified
+// with a context. Returns nil if/when the service becomes the desired state.
+func WaitForState(ctx context.Context, serviceName string, desiredState svc.State) error {
+ // open handle to service
+ manager, err := OpenSCManager(windows.SC_MANAGER_CONNECT)
+ if err != nil {
+ return fmt.Errorf("could not open SCM: %v", err)
+ }
+ defer manager.Disconnect()
+
+ service, err := OpenService(manager, serviceName, windows.SERVICE_QUERY_STATUS)
+ if err != nil {
+ return fmt.Errorf("could not open service %s: %v", serviceName, err)
+ }
+ defer service.Close()
+
+ // check if state matches desiredState
+ status, err := service.Query()
+ if err != nil {
+ return fmt.Errorf("could not retrieve service status: %v", err)
+ }
+ if status.State == desiredState {
+ return nil
+ }
+
+ // Wait for timeout or state to match desiredState
+ for {
+ select {
+ case <-time.After(300 * time.Millisecond):
+ status, err := service.Query()
+ if err != nil {
+ return fmt.Errorf("could not retrieve service status: %v", err)
+ }
+ if status.State == desiredState {
+ return nil
+ }
+ case <-ctx.Done():
+ status, err := service.Query()
+ if err != nil {
+ return fmt.Errorf("could not retrieve service status: %v", err)
+ }
+ if status.State == desiredState {
+ return nil
+ }
+ return ctx.Err()
+ }
+ }
+}
+
+// RestartService stops a service and thenif the stop was successful starts it again
+func RestartService(serviceName string) error {
+ var err error
+ if err = StopService(serviceName); err == nil {
+ err = StartService(serviceName)
+ }
+ return err
+}
+
+// ListDependentServices returns the services that depend on serviceName
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/winsvc/nf-winsvc-enumdependentservicesw
+//
+// when Go has their version, replace ours with the upstream
+// https://github.com/golang/go/issues/56766
+func ListDependentServices(serviceName string, state enumServiceState) ([]EnumServiceStatus, error) {
+ manager, err := OpenSCManager(windows.SC_MANAGER_CONNECT)
+ if err != nil {
+ return nil, err
+ }
+ defer manager.Disconnect()
+
+ service, err := OpenService(manager, serviceName, windows.SERVICE_ENUMERATE_DEPENDENTS)
+ if err != nil {
+ return nil, fmt.Errorf("could not open service %s: %v", serviceName, err)
+ }
+ defer service.Close()
+
+ deps, err := enumDependentServices(service.Handle, state)
+ if err != nil {
+ return nil, fmt.Errorf("could not enumerate dependent services for %s: %v", serviceName, err)
+ }
+ return deps, nil
+}
+
+// IsServiceDisabled returns true if serviceName is disabled
+func IsServiceDisabled(serviceName string) (enabled bool, err error) {
+ enabled = false
+
+ manager, err := OpenSCManager(windows.SC_MANAGER_CONNECT)
+ if err != nil {
+ return
+ }
+ defer manager.Disconnect()
+
+ service, err := OpenService(manager, serviceName, windows.SERVICE_QUERY_CONFIG)
+ if err != nil {
+ return enabled, fmt.Errorf("could not open service %s: %v", serviceName, err)
+ }
+ defer service.Close()
+
+ serviceConfig, err := service.Config()
+ if err != nil {
+ return enabled, fmt.Errorf("could not retrieve config for %s: %v", serviceName, err)
+ }
+ return (serviceConfig.StartType == windows.SERVICE_DISABLED), nil
+}
+
+// IsServiceRunning returns true if serviceName's state is SERVICE_RUNNING
+func IsServiceRunning(serviceName string) (running bool, err error) {
+ running = false
+
+ manager, err := OpenSCManager(windows.SC_MANAGER_CONNECT)
+ if err != nil {
+ return
+ }
+ defer manager.Disconnect()
+
+ service, err := OpenService(manager, serviceName, windows.SERVICE_QUERY_STATUS)
+ if err != nil {
+ return running, fmt.Errorf("could not open service %s: %v", serviceName, err)
+ }
+ defer service.Close()
+
+ serviceStatus, err := service.Query()
+ if err != nil {
+ return running, fmt.Errorf("could not retrieve status for %s: %v", serviceName, err)
+ }
+ return (serviceStatus.State == windows.SERVICE_RUNNING), nil
+}
+
+// ServiceStatus reports information pertaining to enumerated services
+// only exported so binary.Read works
+type ServiceStatus struct {
+ DwServiceType uint32
+ DwCurrentState uint32
+ DwControlsAccepted uint32
+ DwWin32ExitCode uint32
+ DwServiceSpecificExitCode uint32
+ DwCheckPoint uint32
+ DwWaitHint uint32
+}
+
+// EnumServiceStatus complete enumerated service information
+// only exported so binary.Read works
+type EnumServiceStatus struct {
+ serviceName string
+ displayName string
+ status ServiceStatus
+}
+
+type internalEnumServiceStatus struct {
+ ServiceName uint64 // offset from beginning of buffer
+ DisplayName uint64 // offset from beginning of buffer.
+ Status ServiceStatus
+ Padding uint32 // structure is qword aligned.
+
+}
+
+func enumDependentServices(h windows.Handle, state enumServiceState) (services []EnumServiceStatus, err error) {
+ services = make([]EnumServiceStatus, 0)
+ var bufsz uint32
+ var count uint32
+ _, _, err = procEnumDependentServices.Call(uintptr(h),
+ uintptr(state),
+ uintptr(0),
+ uintptr(0), // current buffer size is zero
+ uintptr(unsafe.Pointer(&bufsz)),
+ uintptr(unsafe.Pointer(&count)))
+
+ // success with a 0 buffer means no dependent services
+ if err == error(windows.ERROR_SUCCESS) {
+ err = nil
+ return
+ }
+
+ // since the initial buffer sent is 0 bytes, we expect the return code to
+ // always be ERROR_MORE_DATA, unless something went wrong
+ if err != error(windows.ERROR_MORE_DATA) {
+ log.Warnf("Error getting buffer %v", err)
+ return
+ }
+
+ servicearray := make([]uint8, bufsz)
+ ret, _, err := procEnumDependentServices.Call(uintptr(h),
+ uintptr(state),
+ uintptr(unsafe.Pointer(&servicearray[0])),
+ uintptr(bufsz),
+ uintptr(unsafe.Pointer(&bufsz)),
+ uintptr(unsafe.Pointer(&count)))
+ if ret == 0 {
+ log.Warnf("Error getting deps %d %v", int(ret), err)
+ return
+ }
+ // now get to parse out the C structure into go.
+ var ess internalEnumServiceStatus
+ baseptr := uintptr(unsafe.Pointer(&servicearray[0]))
+ buf := bytes.NewReader(servicearray)
+ for i := uint32(0); i < count; i++ {
+
+ err = binary.Read(buf, binary.LittleEndian, &ess)
+ if err != nil {
+ break
+ }
+
+ ess.ServiceName = ess.ServiceName - uint64(baseptr)
+ ess.DisplayName = ess.DisplayName - uint64(baseptr)
+ ss := EnumServiceStatus{serviceName: ConvertWindowsString(servicearray[ess.ServiceName:]),
+ displayName: ConvertWindowsString(servicearray[ess.DisplayName:]),
+ status: ess.Status}
+ services = append(services, ss)
+ }
+ return
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/shutil.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/shutil.go
new file mode 100644
index 0000000000..8a6ce97ff0
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/shutil.go
@@ -0,0 +1,82 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+//go:build windows
+
+package winutil
+
+import (
+ "path/filepath"
+
+ "golang.org/x/sys/windows"
+ "golang.org/x/sys/windows/registry"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+func getDefaultProgramDataDir() (path string, err error) {
+ res, err := windows.KnownFolderPath(windows.FOLDERID_ProgramData, 0)
+ if err == nil {
+ path = filepath.Join(res, "Datadog")
+ }
+ return
+}
+
+// GetProgramDataDir returns the current programdatadir, usually
+// c:\programdata\Datadog
+func GetProgramDataDir() (path string, err error) {
+ return GetProgramDataDirForProduct("Datadog Agent")
+}
+
+// GetProgramDataDirForProduct returns the current programdatadir, usually
+// c:\programdata\Datadog given a product key name
+func GetProgramDataDirForProduct(product string) (path string, err error) {
+ keyname := "SOFTWARE\\Datadog\\" + product
+ k, err := registry.OpenKey(registry.LOCAL_MACHINE,
+ keyname,
+ registry.ALL_ACCESS)
+ if err != nil {
+ // if the key isn't there, we might be running a standalone binary that wasn't installed through MSI
+ log.Debugf("Windows installation key root (%s) not found, using default program data dir", keyname)
+ return getDefaultProgramDataDir()
+ }
+ defer k.Close()
+ val, _, err := k.GetStringValue("ConfigRoot")
+ if err != nil {
+ log.Warnf("Windows installation key config not found, using default program data dir")
+ return getDefaultProgramDataDir()
+ }
+ path = val
+ return
+}
+
+// GetProgramFilesDirForProduct returns the root of the installatoin directory,
+// usually c:\program files\datadog\datadog agent
+func GetProgramFilesDirForProduct(product string) (path string, err error) {
+ keyname := "SOFTWARE\\Datadog\\" + product
+ k, err := registry.OpenKey(registry.LOCAL_MACHINE,
+ keyname,
+ registry.ALL_ACCESS)
+ if err != nil {
+ // if the key isn't there, we might be running a standalone binary that wasn't installed through MSI
+ log.Debugf("Windows installation key root (%s) not found, using default program data dir", keyname)
+ return getDefaultProgramFilesDir()
+ }
+ defer k.Close()
+ val, _, err := k.GetStringValue("InstallPath")
+ if err != nil {
+ log.Warnf("Windows installation key config not found, using default program data dir")
+ return getDefaultProgramFilesDir()
+ }
+ path = val
+ return
+}
+
+func getDefaultProgramFilesDir() (path string, err error) {
+ res, err := windows.KnownFolderPath(windows.FOLDERID_ProgramFiles, 0)
+ if err == nil {
+ path = filepath.Join(res, "Datadog", "Datadog Agent")
+ }
+ return
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/time.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/time.go
new file mode 100644
index 0000000000..f86a098f5b
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/time.go
@@ -0,0 +1,23 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023-present Datadog, Inc.
+
+//go:build windows
+
+package winutil
+
+// EpochDifferenceSecs is the difference between windows and unix epochs in 100ns intervals
+// From GetUnixTimestamp() datadog-windows-filter\ddfilter\http\http_callbacks.c
+// 11644473600s * 1000ms/s * 1000us/ms * 10 intervals/us
+const EpochDifferenceSecs uint64 = 116444736000000000
+
+// FileTimeToUnixNano translates Windows FileTime to nanoseconds since Unix epoch
+func FileTimeToUnixNano(ft uint64) uint64 {
+ return (ft - EpochDifferenceSecs) * 100
+}
+
+// FileTimeToUnix translates Windows FileTime to seconds since Unix epoch
+func FileTimeToUnix(ft uint64) uint64 {
+ return (ft - EpochDifferenceSecs) / 10000000
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/users.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/users.go
new file mode 100644
index 0000000000..19fe0f4189
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/users.go
@@ -0,0 +1,64 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2018-present Datadog, Inc.
+//go:build windows
+
+package winutil
+
+import (
+ "fmt"
+ "syscall"
+
+ "golang.org/x/sys/windows"
+
+ "github.com/DataDog/datadog-agent/pkg/util/log"
+)
+
+// GetSidFromUser grabs and returns the windows SID for the current user or an error.
+// The *SID returned does not need to be freed by the caller.
+func GetSidFromUser() (*windows.SID, error) {
+ log.Infof("Getting sidstring from user")
+ tok, e := syscall.OpenCurrentProcessToken()
+ if e != nil {
+ log.Warnf("Couldn't get process token %v", e)
+ return nil, e
+ }
+ defer tok.Close()
+
+ user, e := tok.GetTokenUser()
+ if e != nil {
+ log.Warnf("Couldn't get token user %v", e)
+ return nil, e
+ }
+
+ sidString, e := user.User.Sid.String()
+ if e != nil {
+ log.Warnf("Couldn't get user sid string %v", e)
+ return nil, e
+ }
+
+ return windows.StringToSid(sidString)
+}
+
+// IsUserAnAdmin returns true is a user is a member of the Administrator's group
+// TODO: Microsoft does not recommend using this function, instead CheckTokenMembership should be used.
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/shlobj_core/nf-shlobj_core-isuseranadmin
+//
+//revive:disable-next-line:var-naming Name is intended to match the Windows API name
+func IsUserAnAdmin() (bool, error) {
+ shell32 := windows.NewLazySystemDLL("Shell32.dll")
+ defer windows.FreeLibrary(windows.Handle(shell32.Handle()))
+
+ isUserAnAdminProc := shell32.NewProc("IsUserAnAdmin")
+ ret, _, winError := isUserAnAdminProc.Call()
+
+ if winError != windows.NTE_OP_OK {
+ return false, fmt.Errorf("IsUserAnAdmin returns error code %d", winError)
+ }
+ if ret == 0 {
+ return false, nil
+ }
+ return true, nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/winmem.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/winmem.go
new file mode 100644
index 0000000000..718171f892
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/winmem.go
@@ -0,0 +1,143 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build windows
+
+package winutil
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var (
+ modkernel32 = windows.NewLazyDLL("kernel32.dll")
+ modPsapi = windows.NewLazyDLL("psapi.dll")
+
+ procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx")
+ procGetPerformanceInfo = modPsapi.NewProc("GetPerformanceInfo")
+)
+
+// VirtualMemoryStat contains basic metrics for virtual memory
+type VirtualMemoryStat struct {
+ // Total amount of RAM on this system
+ Total uint64
+
+ // RAM available for programs to allocate
+ //
+ // This value is computed from the kernel specific values.
+ Available uint64
+
+ // RAM used by programs
+ //
+ // This value is computed from the kernel specific values.
+ Used uint64
+
+ // Percentage of RAM used by programs
+ //
+ // This value is computed from the kernel specific values.
+ UsedPercent float64
+}
+
+// PagefileStat contains basic metrics for the windows pagefile
+type PagefileStat struct {
+ // The current committed memory limit for the system or
+ // the current process, whichever is smaller, in bytes
+ Total uint64
+
+ // The maximum amount of memory the current process can commit, in bytes.
+ // This value is equal to or smaller than the system-wide available commit
+ // value.
+ Available uint64
+
+ // Used is Total - Available
+ Used uint64
+
+ // UsedPercent is used as a percentage of the total pagefile
+ UsedPercent float64
+}
+
+// SwapMemoryStat contains swap statistics
+type SwapMemoryStat struct {
+ Total uint64
+ Used uint64
+ Free uint64
+ UsedPercent float64
+}
+
+type memoryStatusEx struct {
+ cbSize uint32
+ dwMemoryLoad uint32
+ ullTotalPhys uint64 // in bytes
+ ullAvailPhys uint64
+ ullTotalPageFile uint64
+ ullAvailPageFile uint64
+ ullTotalVirtual uint64
+ ullAvailVirtual uint64
+ ullAvailExtendedVirtual uint64
+}
+
+// VirtualMemory returns virtual memory metrics for the machine
+func VirtualMemory() (*VirtualMemoryStat, error) {
+ var memInfo memoryStatusEx
+ memInfo.cbSize = uint32(unsafe.Sizeof(memInfo))
+ mem, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&memInfo)))
+ if mem == 0 {
+ return nil, windows.GetLastError()
+ }
+
+ ret := &VirtualMemoryStat{
+ Total: memInfo.ullTotalPhys,
+ Available: memInfo.ullAvailPhys,
+ Used: memInfo.ullTotalPhys - memInfo.ullAvailPhys,
+ UsedPercent: float64(memInfo.dwMemoryLoad),
+ }
+
+ return ret, nil
+}
+
+// PagefileMemory returns paging (swap) file metrics
+func PagefileMemory() (*PagefileStat, error) {
+ var memInfo memoryStatusEx
+ memInfo.cbSize = uint32(unsafe.Sizeof(memInfo))
+ mem, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&memInfo)))
+ if mem == 0 {
+ return nil, windows.GetLastError()
+ }
+ total := memInfo.ullTotalPageFile
+ free := memInfo.ullAvailPageFile
+ used := total - free
+ percent := (float64(used) / float64(total)) * 100
+ ret := &PagefileStat{
+ Total: total,
+ Available: free,
+ Used: used,
+ UsedPercent: percent,
+ }
+
+ return ret, nil
+}
+
+// SwapMemory returns swapfile statistics
+func SwapMemory() (*SwapMemoryStat, error) {
+ var perfInfo performanceInformation
+ perfInfo.cb = uint32(unsafe.Sizeof(perfInfo))
+ mem, _, _ := procGetPerformanceInfo.Call(uintptr(unsafe.Pointer(&perfInfo)), uintptr(perfInfo.cb))
+ if mem == 0 {
+ return nil, windows.GetLastError()
+ }
+ tot := uint64(perfInfo.commitLimit * perfInfo.pageSize)
+ used := uint64(perfInfo.commitTotal * perfInfo.pageSize)
+ free := tot - used
+ ret := &SwapMemoryStat{
+ Total: tot,
+ Used: used,
+ Free: free,
+ UsedPercent: (float64(used) / float64(tot)) * 100,
+ }
+
+ return ret, nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/winmem_386.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/winmem_386.go
new file mode 100644
index 0000000000..d3e64d0cb6
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/winmem_386.go
@@ -0,0 +1,25 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build windows
+
+package winutil
+
+type performanceInformation struct {
+ cb uint32
+ commitTotal uint32
+ commitLimit uint32
+ commitPeak uint32
+ physicalTotal uint32
+ physicalAvailable uint32
+ systemCache uint32
+ kernelTotal uint32
+ kernelPaged uint32
+ kernelNonpaged uint32
+ pageSize uint32
+ handleCount uint32
+ processCount uint32
+ threadCount uint32
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/winmem_amd64.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/winmem_amd64.go
new file mode 100644
index 0000000000..84c5f934c1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/winmem_amd64.go
@@ -0,0 +1,25 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build windows
+
+package winutil
+
+type performanceInformation struct {
+ cb uint32
+ commitTotal uint64
+ commitLimit uint64
+ commitPeak uint64
+ physicalTotal uint64
+ physicalAvailable uint64
+ systemCache uint64
+ kernelTotal uint64
+ kernelPaged uint64
+ kernelNonpaged uint64
+ pageSize uint64
+ handleCount uint32
+ processCount uint32
+ threadCount uint32
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/winsec.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/winsec.go
new file mode 100644
index 0000000000..e48762e7ef
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/winsec.go
@@ -0,0 +1,135 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2018-present Datadog, Inc.
+
+//go:build windows
+
+package winutil
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var (
+ advapi32 = syscall.NewLazyDLL("advapi32.dll")
+
+ //revive:disable:var-naming Name is intended to match the Windows API name
+ procGetAclInformation = advapi32.NewProc("GetAclInformation")
+ procGetNamedSecurityInfo = advapi32.NewProc("GetNamedSecurityInfoW")
+ procGetAce = advapi32.NewProc("GetAce")
+ //revive:enable:var-naming
+)
+
+// ACL_SIZE_INFORMATION struct
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-acl_size_information
+//
+//revive:disable:var-naming Name is intended to match the Windows type name
+type ACL_SIZE_INFORMATION struct {
+ AceCount uint32
+ AclBytesInUse uint32
+ AclBytesFree uint32
+}
+
+// ACL struct
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-acl
+type ACL struct {
+ AclRevision uint8
+ Sbz1 uint8
+ AclSize uint16
+ AceCount uint16
+ Sbz2 uint16
+}
+
+// ACCESS_ALLOWED_ACE struct
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-access_allowed_ace
+type ACCESS_ALLOWED_ACE struct {
+ AceType uint8
+ AceFlags uint8
+ AceSize uint16
+ AccessMask uint32
+ SidStart uint32
+}
+
+//revive:enable:var-naming (types)
+
+//revive:disable:var-naming Name is intended to match the Windows const name
+
+// ACL_INFORMATION_CLASS enum
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-acl_information_class
+const (
+ AclRevisionInformation = 1
+ AclSizeInformation = 2
+)
+
+// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header
+const (
+ ACCESS_ALLOWED_ACE_TYPE = 0
+ ACCESS_DENIED_ACE_TYPE = 1
+)
+
+//revive:enable:var-naming (const)
+
+// GetAclInformation calls windows 'GetAclInformation' function to retrieve
+// information about an access control list (ACL).
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/securitybaseapi/nf-securitybaseapi-getaclinformation
+//
+//revive:disable-next-line:var-naming Name is intended to match the Windows API name
+func GetAclInformation(acl *ACL, info *ACL_SIZE_INFORMATION, class uint32) error {
+ length := unsafe.Sizeof(*info)
+ ret, _, _ := procGetAclInformation.Call(
+ uintptr(unsafe.Pointer(acl)),
+ uintptr(unsafe.Pointer(info)),
+ uintptr(length),
+ uintptr(class))
+
+ if int(ret) == 0 {
+ return windows.GetLastError()
+ }
+ return nil
+}
+
+// GetNamedSecurityInfo calls Windows 'GetNamedSecurityInfo' function to
+// retrieve a copy of the security descriptor for an object specified by name.
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/aclapi/nf-aclapi-getnamedsecurityinfow
+//
+//revive:disable-next-line:var-naming Name is intended to match the Windows API name
+func GetNamedSecurityInfo(objectName string, objectType int32, secInfo uint32, owner, group **windows.SID, dacl, sacl **ACL, secDesc *windows.Handle) error {
+ ret, _, err := procGetNamedSecurityInfo.Call(
+ uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(objectName))),
+ uintptr(objectType),
+ uintptr(secInfo),
+ uintptr(unsafe.Pointer(owner)),
+ uintptr(unsafe.Pointer(group)),
+ uintptr(unsafe.Pointer(dacl)),
+ uintptr(unsafe.Pointer(sacl)),
+ uintptr(unsafe.Pointer(secDesc)),
+ )
+ if ret != 0 {
+ return err
+ }
+ return nil
+}
+
+// GetAce calls Windows 'GetAce' function to obtain a pointer to an access
+// control entry (ACE) in an access control list (ACL).
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/securitybaseapi/nf-securitybaseapi-getace
+//
+//revive:disable-next-line:var-naming Name is intended to match the Windows API name
+func GetAce(acl *ACL, index uint32, ace **ACCESS_ALLOWED_ACE) error {
+ ret, _, _ := procGetAce.Call(uintptr(unsafe.Pointer(acl)), uintptr(index), uintptr(unsafe.Pointer(ace)))
+ if int(ret) != 0 {
+ return windows.GetLastError()
+ }
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/winstrings.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/winstrings.go
new file mode 100644
index 0000000000..e0d49881db
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/winstrings.go
@@ -0,0 +1,96 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build windows
+
+// Package winutil provides windows utilities
+package winutil
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+// ConvertWindowsStringList Converts a windows-style C list of strings
+// (single null terminated elements
+// double-null indicates the end of the list) to an array of Go strings
+func ConvertWindowsStringList(winput []uint16) []string {
+
+ if len(winput) < 2 {
+ return nil
+ }
+ val := make([]string, 0, 5)
+ from := 0
+
+ for i := 0; i < (len(winput) - 1); i++ {
+ if winput[i] == 0 {
+ val = append(val, windows.UTF16ToString(winput[from:i]))
+ from = i + 1
+
+ if winput[i+1] == 0 {
+ return val
+ }
+ }
+ }
+ return val
+
+}
+
+// ConvertWindowsString converts a windows c-string
+// into a go string. Even though the input is array
+// of uint8, the underlying data is expected to be
+// uint16 (unicode)
+func ConvertWindowsString(winput []uint8) string {
+
+ p := (*[1 << 29]uint16)(unsafe.Pointer(&winput[0]))[: len(winput)/2 : len(winput)/2]
+ return windows.UTF16ToString(p)
+
+}
+
+// ConvertWindowsString16 converts a windows c-string
+// into a go string. Even though the input is array
+// of uint8, the underlying data is expected to be
+// uint16 (unicode)
+func ConvertWindowsString16(winput []uint16) string {
+ return windows.UTF16ToString(winput)
+}
+
+// ExpandEnvironmentStrings returns a string with any environment variables
+// substituted.
+//
+// provided here because `x/sys/windows` provides a wrapper to the underlying
+// function, but it expects C strings. This will do the buffer calculation
+// and return the go string everyone wants.
+func ExpandEnvironmentStrings(input string) (string, error) {
+
+ asutf16 := windows.StringToUTF16Ptr(input)
+
+ sz, err := windows.ExpandEnvironmentStrings(asutf16, nil, 0)
+ if err != nil {
+ return "", err
+ }
+ sz += 2 // leave room for terminating null, and a bonus char
+ target := make([]uint16, sz)
+
+ _, err = windows.ExpandEnvironmentStrings(asutf16, (*uint16)(unsafe.Pointer(&target[0])), sz)
+ if err != nil {
+ return "", err
+ }
+ return windows.UTF16ToString(target), nil
+}
+
+// UTF16PtrOrNilFromString converts a go string into a *uint16
+// using windows.Utf16PtrFromString, but will return nil for empty strings.
+//
+// Useful for Windows APIs that take NULL or a non-zero length string.
+// Be careful to check that the Windows API does not have special behavior
+// for a zero-length string.
+func UTF16PtrOrNilFromString(s string) (*uint16, error) {
+ if s == "" {
+ return nil, nil
+ }
+ return windows.UTF16PtrFromString(s)
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/winver.go b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/winver.go
new file mode 100644
index 0000000000..958c2a7364
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/util/winutil/winver.go
@@ -0,0 +1,130 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+//go:build windows
+
+// Package winutil contains Windows OS utilities
+package winutil
+
+import (
+ "fmt"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var (
+ k32 = windows.NewLazyDLL("kernel32.dll")
+ versiondll = windows.NewLazyDLL("version.dll")
+
+ procGetModuleHandle = k32.NewProc("GetModuleHandleW")
+ procGetModuleFileName = k32.NewProc("GetModuleFileNameW")
+ procGetFileVersionInfoSizeEx = versiondll.NewProc("GetFileVersionInfoSizeExW")
+ procGetFileVersionInfoEx = versiondll.NewProc("GetFileVersionInfoExW")
+ procVerQueryValue = versiondll.NewProc("VerQueryValueW")
+)
+
+// GetWindowsBuildString retrieves the windows build version by querying
+// the resource string as directed here https://msdn.microsoft.com/en-us/library/windows/desktop/ms724429(v=vs.85).aspx
+// as of Windows 8.1, the core GetVersion() APIs have been changed to
+// return the version of Windows manifested with the application, not
+// the application version
+func GetWindowsBuildString() (verstring string, err error) {
+ h, err := getModuleHandle("kernel32.dll")
+ if err != nil {
+ return
+ }
+ fullpath, err := getModuleFileName(h)
+ if err != nil {
+ return
+ }
+ data, err := getFileVersionInfo(fullpath)
+ if err != nil {
+ return
+ }
+ return getVersionInfo(data)
+}
+
+func getModuleHandle(fname string) (handle uintptr, err error) {
+ file := windows.StringToUTF16Ptr(fname)
+ handle, _, err = procGetModuleHandle.Call(uintptr(unsafe.Pointer(file)))
+ if handle == 0 {
+ return handle, err
+ }
+ return handle, nil
+}
+
+func getModuleFileName(h uintptr) (fname string, err error) {
+ fname = ""
+ err = nil
+ var sizeIncr = uint32(1024)
+ var size = sizeIncr
+ for {
+ buf := make([]uint16, size)
+ ret, _, err := procGetModuleFileName.Call(h, uintptr(unsafe.Pointer(&buf[0])), uintptr(size))
+ if ret == uintptr(size) || err == windows.ERROR_INSUFFICIENT_BUFFER {
+ size += sizeIncr
+ continue
+ } else if err != nil {
+ fname = windows.UTF16ToString(buf)
+ }
+ break
+ }
+ return
+
+}
+
+func getFileVersionInfo(filename string) (block []uint8, err error) {
+ fname := windows.StringToUTF16Ptr(filename)
+ ret, _, err := procGetFileVersionInfoSizeEx.Call(uintptr(0x02),
+ uintptr(unsafe.Pointer(fname)), uintptr(0))
+ if ret == 0 {
+ return
+ }
+ size := uint32(ret)
+ block = make([]uint8, size)
+ ret, _, err = procGetFileVersionInfoEx.Call(uintptr(0x02),
+ uintptr(unsafe.Pointer(fname)), uintptr(0), uintptr(size), uintptr(unsafe.Pointer(&block[0])))
+ if ret == 0 {
+ return nil, err
+ }
+ return block, nil
+
+}
+
+type tagVSFIXEDFILEINFO struct {
+ dwSignature uint32
+ dwStrucVersion uint32
+ dwFileVersionMS uint32
+ dwFileVersionLS uint32
+ dwProductVersionMS uint32
+ dwProductVersionLS uint32
+ dwFileFlagsMask uint32
+ dwFileFlags uint32
+ dwFileOS uint32
+ dwFileType uint32
+ dwFileSubtype uint32
+ dwFileDateMS uint32
+ dwFileDateLS uint32
+}
+
+func getVersionInfo(block []uint8) (ver string, err error) {
+
+ subblock := windows.StringToUTF16Ptr("\\")
+ var infoptr unsafe.Pointer
+ var ulen uint32
+ ret, _, err := procVerQueryValue.Call(uintptr(unsafe.Pointer(&block[0])),
+ uintptr(unsafe.Pointer(subblock)),
+ uintptr(unsafe.Pointer(&infoptr)),
+ uintptr(unsafe.Pointer(&ulen)))
+ if ret == 0 {
+ return
+ }
+ ffi := (*tagVSFIXEDFILEINFO)(infoptr)
+ ver = fmt.Sprintf("%d.%d Build %d", ffi.dwProductVersionMS>>16, ffi.dwProductVersionMS&0xFF, ffi.dwProductVersionLS>>16)
+
+ return ver, nil
+
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/version/LICENSE b/vendor/github.com/DataDog/datadog-agent/pkg/version/LICENSE
new file mode 100644
index 0000000000..b370545be1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/version/LICENSE
@@ -0,0 +1,200 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-present Datadog, Inc.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/version/base.go b/vendor/github.com/DataDog/datadog-agent/pkg/version/base.go
new file mode 100644
index 0000000000..e2d81c9484
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/version/base.go
@@ -0,0 +1,27 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+// Package version defines the version of the agent
+package version
+
+// AgentVersion contains the version of the Agent.
+// It is populated at build time using build flags, see get_version_ldflags in tasks/utils.py
+var AgentVersion string
+
+// AgentPackageVersion contains the version of the datadog-agent package when installed by the updater.
+// It has more info than AgentVersion and
+// it is populated at build time using build flags, see get_version_ldflags in tasks/utils.py
+var AgentPackageVersion string
+
+// Commit is populated with the short commit hash from which the Agent was built
+var Commit string
+
+var agentVersionDefault = "6.0.0"
+
+func init() {
+ if AgentVersion == "" {
+ AgentVersion = agentVersionDefault
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/version/version.go b/vendor/github.com/DataDog/datadog-agent/pkg/version/version.go
new file mode 100644
index 0000000000..58a233fd62
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-agent/pkg/version/version.go
@@ -0,0 +1,95 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016-present Datadog, Inc.
+
+package version
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// Version holds SemVer infos for the agent and friends
+type Version struct {
+ Major int64
+ Minor int64
+ Patch int64
+ Pre string
+ Meta string
+ Commit string
+}
+
+var versionRx = regexp.MustCompile(`(\d+\.\d+\.\d+)(\-[^\+]+)*(\+.+)*`)
+
+// Agent returns the Datadog Agent version.
+func Agent() (Version, error) {
+ return New(AgentVersion, Commit)
+}
+
+// New parses a version string like `0.0.0` and a commit identifier and returns a Version instance
+func New(version, commit string) (Version, error) {
+ toks := versionRx.FindStringSubmatch(version)
+ if len(toks) == 0 || toks[0] != version {
+ // if regex didn't match or partially matched, raise an error
+ return Version{}, fmt.Errorf("Version string has wrong format")
+ }
+
+ // split version info (group 1 in regexp)
+ parts := strings.Split(toks[1], ".")
+ major, _ := strconv.ParseInt(parts[0], 10, 64)
+ minor, _ := strconv.ParseInt(parts[1], 10, 64)
+ patch, _ := strconv.ParseInt(parts[2], 10, 64)
+
+ // save Pre infos after removing leading `-`
+ pre := strings.Replace(toks[2], "-", "", 1)
+
+ // save Meta infos after removing leading `+`
+ meta := strings.Replace(toks[3], "+", "", 1)
+
+ av := Version{
+ Major: major,
+ Minor: minor,
+ Patch: patch,
+ Pre: pre,
+ Meta: meta,
+ Commit: commit,
+ }
+
+ return av, nil
+}
+
+func (v *Version) String() string {
+ ver := v.GetNumber()
+ if v.Pre != "" {
+ ver = fmt.Sprintf("%s-%s", ver, v.Pre)
+ }
+ if v.Meta != "" {
+ ver = fmt.Sprintf("%s+%s", ver, v.Meta)
+ }
+ if v.Commit != "" {
+ if v.Meta != "" {
+ ver = fmt.Sprintf("%s.commit.%s", ver, v.Commit)
+ } else {
+ ver = fmt.Sprintf("%s+commit.%s", ver, v.Commit)
+ }
+ }
+
+ return ver
+}
+
+// GetNumber returns a string containing version numbers only, e.g. `0.0.0`
+func (v *Version) GetNumber() string {
+ return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)
+}
+
+// GetNumberAndPre returns a string containing version number and the pre only, e.g. `0.0.0-beta.1`
+func (v *Version) GetNumberAndPre() string {
+ version := fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)
+ if v.Pre != "" {
+ version = fmt.Sprintf("%s-%s", version, v.Pre)
+ }
+ return version
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/.apigentools-info b/vendor/github.com/DataDog/datadog-api-client-go/v2/.apigentools-info
index 803b421807..46fb44a75d 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/.apigentools-info
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/.apigentools-info
@@ -4,13 +4,13 @@
"spec_versions": {
"v1": {
"apigentools_version": "1.6.6",
- "regenerated": "2024-03-13 18:08:58.635802",
- "spec_repo_commit": "29884c34"
+ "regenerated": "2024-05-20 18:10:24.268068",
+ "spec_repo_commit": "6340fda5"
},
"v2": {
"apigentools_version": "1.6.6",
- "regenerated": "2024-03-13 18:08:58.653539",
- "spec_repo_commit": "29884c34"
+ "regenerated": "2024-05-20 18:10:24.286542",
+ "spec_repo_commit": "6340fda5"
}
}
}
\ No newline at end of file
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/CHANGELOG.md b/vendor/github.com/DataDog/datadog-api-client-go/v2/CHANGELOG.md
index e437902b16..6720c0d543 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/CHANGELOG.md
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/CHANGELOG.md
@@ -1,5 +1,46 @@
# CHANGELOG
+## 2.26.0 / 2024-05-21
+
+### Fixed
+* fix case search documentation by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2469
+### Added
+* Add support variablesFromScript in Synthetics API test by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2471
+* Add JSONSchema assertion support to API and multistep tests by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2448
+* add 1 day logs to usage api docs by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2477
+* Update UserTeamIncluded to include teams by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2482
+* Security Monitoring - Make Default Tags available in the response by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2491
+* Add flex logs storage tier by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2493
+### [**Breaking**]Changed
+* Rename the Cloud Workload Security tag to CSM Threats by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2481
+
+
+**Full Changelog**: https://github.com/DataDog/datadog-api-client-go/compare/v2.25.0...v2.26.0
+
+## 2.25.0 / 2024-04-11
+
+### Fixed
+* Update Cleanup script to use GCP STS endpoint by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2423
+* Add include data to get team memberships response by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2407
+### Added
+* Add `ci-pipeline-fingerprints` field in v2.2 by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2432
+* Add validation endpoint for Security Monitoring Rules by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2453
+* Add UA documentation for online_archive and incident_management by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2457
+* Mark `unit` as nullable by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2459
+* Add query_interval_seconds to time-slice SLO condition parameters by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2461
+* Support providing files for the file upload feature when creating a Synthetic API test by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2460
+* Adding SLO Reporting API Documentation by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2426
+* Security Monitoring Suppression - Add data_exclusion_query field by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2465
+* aws api adding extended and deprecating old resource collection field by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2463
+### Changed
+* Add Team relationship to AuthNMappings by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2382
+### Deprecated
+* Remove deprecated /api/v1/usage/attribution by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2443
+* Deprecate legacy hourly usage metering endpoints by @api-clients-generation-pipeline in https://github.com/DataDog/datadog-api-client-go/pull/2439
+
+
+**Full Changelog**: https://github.com/DataDog/datadog-api-client-go/compare/v2.24.0...v2.25.0
+
## 2.24.0 / 2024-03-13
### Fixed
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadog/configuration.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadog/configuration.go
index 0dbecbbd2c..15cf3ace9f 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadog/configuration.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadog/configuration.go
@@ -364,6 +364,9 @@ func NewConfiguration() *Configuration {
"v2.GetIncidentService": false,
"v2.ListIncidentServices": false,
"v2.UpdateIncidentService": false,
+ "v2.CreateSLOReportJob": false,
+ "v2.GetSLOReport": false,
+ "v2.GetSLOReportJobStatus": false,
"v2.CreateIncidentTeam": false,
"v2.DeleteIncidentTeam": false,
"v2.GetIncidentTeam": false,
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/api_usage_metering.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/api_usage_metering.go
index aca915f3fe..abccaecfb9 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/api_usage_metering.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/api_usage_metering.go
@@ -315,7 +315,9 @@ func (r *GetIncidentManagementOptionalParameters) WithEndHr(endHr time.Time) *Ge
// GetIncidentManagement Get hourly usage for incident management.
// Get hourly usage for incident management.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetIncidentManagement(ctx _context.Context, startHr time.Time, o ...GetIncidentManagementOptionalParameters) (UsageIncidentManagementResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -415,7 +417,9 @@ func (r *GetIngestedSpansOptionalParameters) WithEndHr(endHr time.Time) *GetInge
// GetIngestedSpans Get hourly usage for ingested spans.
// Get hourly usage for ingested spans.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetIngestedSpans(ctx _context.Context, startHr time.Time, o ...GetIngestedSpansOptionalParameters) (UsageIngestedSpansResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -963,7 +967,9 @@ func (r *GetUsageAnalyzedLogsOptionalParameters) WithEndHr(endHr time.Time) *Get
// GetUsageAnalyzedLogs Get hourly usage for analyzed logs.
// Get hourly usage for analyzed logs (Security Monitoring).
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageAnalyzedLogs(ctx _context.Context, startHr time.Time, o ...GetUsageAnalyzedLogsOptionalParameters) (UsageAnalyzedLogsResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -1044,160 +1050,6 @@ func (a *UsageMeteringApi) GetUsageAnalyzedLogs(ctx _context.Context, startHr ti
return localVarReturnValue, localVarHTTPResponse, nil
}
-// GetUsageAttributionOptionalParameters holds optional parameters for GetUsageAttribution.
-type GetUsageAttributionOptionalParameters struct {
- EndMonth *time.Time
- SortDirection *UsageSortDirection
- SortName *UsageAttributionSort
- IncludeDescendants *bool
- Offset *int64
- Limit *int64
-}
-
-// NewGetUsageAttributionOptionalParameters creates an empty struct for parameters.
-func NewGetUsageAttributionOptionalParameters() *GetUsageAttributionOptionalParameters {
- this := GetUsageAttributionOptionalParameters{}
- return &this
-}
-
-// WithEndMonth sets the corresponding parameter name and returns the struct.
-func (r *GetUsageAttributionOptionalParameters) WithEndMonth(endMonth time.Time) *GetUsageAttributionOptionalParameters {
- r.EndMonth = &endMonth
- return r
-}
-
-// WithSortDirection sets the corresponding parameter name and returns the struct.
-func (r *GetUsageAttributionOptionalParameters) WithSortDirection(sortDirection UsageSortDirection) *GetUsageAttributionOptionalParameters {
- r.SortDirection = &sortDirection
- return r
-}
-
-// WithSortName sets the corresponding parameter name and returns the struct.
-func (r *GetUsageAttributionOptionalParameters) WithSortName(sortName UsageAttributionSort) *GetUsageAttributionOptionalParameters {
- r.SortName = &sortName
- return r
-}
-
-// WithIncludeDescendants sets the corresponding parameter name and returns the struct.
-func (r *GetUsageAttributionOptionalParameters) WithIncludeDescendants(includeDescendants bool) *GetUsageAttributionOptionalParameters {
- r.IncludeDescendants = &includeDescendants
- return r
-}
-
-// WithOffset sets the corresponding parameter name and returns the struct.
-func (r *GetUsageAttributionOptionalParameters) WithOffset(offset int64) *GetUsageAttributionOptionalParameters {
- r.Offset = &offset
- return r
-}
-
-// WithLimit sets the corresponding parameter name and returns the struct.
-func (r *GetUsageAttributionOptionalParameters) WithLimit(limit int64) *GetUsageAttributionOptionalParameters {
- r.Limit = &limit
- return r
-}
-
-// GetUsageAttribution Get usage attribution.
-// Get usage attribution.
-// **Note:** This endpoint will be fully deprecated on December 1, 2022.
-// Refer to [Migrating from v1 to v2 of the Usage Attribution API](https://docs.datadoghq.com/account_management/guide/usage-attribution-migration/) for the associated migration guide.
-//
-// Deprecated: This API is deprecated.
-func (a *UsageMeteringApi) GetUsageAttribution(ctx _context.Context, startMonth time.Time, fields UsageAttributionSupportedMetrics, o ...GetUsageAttributionOptionalParameters) (UsageAttributionResponse, *_nethttp.Response, error) {
- var (
- localVarHTTPMethod = _nethttp.MethodGet
- localVarPostBody interface{}
- localVarReturnValue UsageAttributionResponse
- optionalParams GetUsageAttributionOptionalParameters
- )
-
- if len(o) > 1 {
- return localVarReturnValue, nil, datadog.ReportError("only one argument of type GetUsageAttributionOptionalParameters is allowed")
- }
- if len(o) == 1 {
- optionalParams = o[0]
- }
-
- localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v1.UsageMeteringApi.GetUsageAttribution")
- if err != nil {
- return localVarReturnValue, nil, datadog.GenericOpenAPIError{ErrorMessage: err.Error()}
- }
-
- localVarPath := localBasePath + "/api/v1/usage/attribution"
-
- localVarHeaderParams := make(map[string]string)
- localVarQueryParams := _neturl.Values{}
- localVarFormParams := _neturl.Values{}
- localVarQueryParams.Add("start_month", datadog.ParameterToString(startMonth, ""))
- localVarQueryParams.Add("fields", datadog.ParameterToString(fields, ""))
- if optionalParams.EndMonth != nil {
- localVarQueryParams.Add("end_month", datadog.ParameterToString(*optionalParams.EndMonth, ""))
- }
- if optionalParams.SortDirection != nil {
- localVarQueryParams.Add("sort_direction", datadog.ParameterToString(*optionalParams.SortDirection, ""))
- }
- if optionalParams.SortName != nil {
- localVarQueryParams.Add("sort_name", datadog.ParameterToString(*optionalParams.SortName, ""))
- }
- if optionalParams.IncludeDescendants != nil {
- localVarQueryParams.Add("include_descendants", datadog.ParameterToString(*optionalParams.IncludeDescendants, ""))
- }
- if optionalParams.Offset != nil {
- localVarQueryParams.Add("offset", datadog.ParameterToString(*optionalParams.Offset, ""))
- }
- if optionalParams.Limit != nil {
- localVarQueryParams.Add("limit", datadog.ParameterToString(*optionalParams.Limit, ""))
- }
- localVarHeaderParams["Accept"] = "application/json;datetime-format=rfc3339"
-
- datadog.SetAuthKeys(
- ctx,
- &localVarHeaderParams,
- [2]string{"apiKeyAuth", "DD-API-KEY"},
- [2]string{"appKeyAuth", "DD-APPLICATION-KEY"},
- )
- req, err := a.Client.PrepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, nil)
- if err != nil {
- return localVarReturnValue, nil, err
- }
-
- localVarHTTPResponse, err := a.Client.CallAPI(req)
- if err != nil || localVarHTTPResponse == nil {
- return localVarReturnValue, localVarHTTPResponse, err
- }
-
- localVarBody, err := datadog.ReadBody(localVarHTTPResponse)
- if err != nil {
- return localVarReturnValue, localVarHTTPResponse, err
- }
-
- if localVarHTTPResponse.StatusCode >= 300 {
- newErr := datadog.GenericOpenAPIError{
- ErrorBody: localVarBody,
- ErrorMessage: localVarHTTPResponse.Status,
- }
- if localVarHTTPResponse.StatusCode == 403 || localVarHTTPResponse.StatusCode == 429 {
- var v APIErrorResponse
- err = a.Client.Decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
- if err != nil {
- return localVarReturnValue, localVarHTTPResponse, newErr
- }
- newErr.ErrorModel = v
- }
- return localVarReturnValue, localVarHTTPResponse, newErr
- }
-
- err = a.Client.Decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
- if err != nil {
- newErr := datadog.GenericOpenAPIError{
- ErrorBody: localVarBody,
- ErrorMessage: err.Error(),
- }
- return localVarReturnValue, localVarHTTPResponse, newErr
- }
-
- return localVarReturnValue, localVarHTTPResponse, nil
-}
-
// GetUsageAuditLogsOptionalParameters holds optional parameters for GetUsageAuditLogs.
type GetUsageAuditLogsOptionalParameters struct {
EndHr *time.Time
@@ -1319,6 +1171,8 @@ func (r *GetUsageBillableSummaryOptionalParameters) WithMonth(month time.Time) *
// GetUsageBillableSummary Get billable usage across your account.
// Get billable usage across your account.
+//
+// This endpoint is only accessible for [parent-level organizations](https://docs.datadoghq.com/account_management/multi_organization/).
func (a *UsageMeteringApi) GetUsageBillableSummary(ctx _context.Context, o ...GetUsageBillableSummaryOptionalParameters) (UsageBillableSummaryResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -1417,7 +1271,9 @@ func (r *GetUsageCIAppOptionalParameters) WithEndHr(endHr time.Time) *GetUsageCI
// GetUsageCIApp Get hourly usage for CI visibility.
// Get hourly usage for CI visibility (tests, pipeline, and spans).
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageCIApp(ctx _context.Context, startHr time.Time, o ...GetUsageCIAppOptionalParameters) (UsageCIVisibilityResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -1517,7 +1373,9 @@ func (r *GetUsageCWSOptionalParameters) WithEndHr(endHr time.Time) *GetUsageCWSO
// GetUsageCWS Get hourly usage for cloud workload security.
// Get hourly usage for cloud workload security.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageCWS(ctx _context.Context, startHr time.Time, o ...GetUsageCWSOptionalParameters) (UsageCWSResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -1617,7 +1475,9 @@ func (r *GetUsageCloudSecurityPostureManagementOptionalParameters) WithEndHr(end
// GetUsageCloudSecurityPostureManagement Get hourly usage for CSM Pro.
// Get hourly usage for cloud security management (CSM) pro.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageCloudSecurityPostureManagement(ctx _context.Context, startHr time.Time, o ...GetUsageCloudSecurityPostureManagementOptionalParameters) (UsageCloudSecurityPostureManagementResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -1717,7 +1577,9 @@ func (r *GetUsageDBMOptionalParameters) WithEndHr(endHr time.Time) *GetUsageDBMO
// GetUsageDBM Get hourly usage for database monitoring.
// Get hourly usage for database monitoring
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageDBM(ctx _context.Context, startHr time.Time, o ...GetUsageDBMOptionalParameters) (UsageDBMResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -1817,7 +1679,9 @@ func (r *GetUsageFargateOptionalParameters) WithEndHr(endHr time.Time) *GetUsage
// GetUsageFargate Get hourly usage for Fargate.
// Get hourly usage for [Fargate](https://docs.datadoghq.com/integrations/ecs_fargate/).
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageFargate(ctx _context.Context, startHr time.Time, o ...GetUsageFargateOptionalParameters) (UsageFargateResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -1917,7 +1781,9 @@ func (r *GetUsageHostsOptionalParameters) WithEndHr(endHr time.Time) *GetUsageHo
// GetUsageHosts Get hourly usage for hosts and containers.
// Get hourly usage for hosts and containers.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageHosts(ctx _context.Context, startHr time.Time, o ...GetUsageHostsOptionalParameters) (UsageHostsResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -2017,7 +1883,9 @@ func (r *GetUsageIndexedSpansOptionalParameters) WithEndHr(endHr time.Time) *Get
// GetUsageIndexedSpans Get hourly usage for indexed spans.
// Get hourly usage for indexed spans.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageIndexedSpans(ctx _context.Context, startHr time.Time, o ...GetUsageIndexedSpansOptionalParameters) (UsageIndexedSpansResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -2117,7 +1985,9 @@ func (r *GetUsageInternetOfThingsOptionalParameters) WithEndHr(endHr time.Time)
// GetUsageInternetOfThings Get hourly usage for IoT.
// Get hourly usage for IoT.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageInternetOfThings(ctx _context.Context, startHr time.Time, o ...GetUsageInternetOfThingsOptionalParameters) (UsageIoTResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -2217,7 +2087,9 @@ func (r *GetUsageLambdaOptionalParameters) WithEndHr(endHr time.Time) *GetUsageL
// GetUsageLambda Get hourly usage for Lambda.
// Get hourly usage for Lambda.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageLambda(ctx _context.Context, startHr time.Time, o ...GetUsageLambdaOptionalParameters) (UsageLambdaResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -2317,7 +2189,9 @@ func (r *GetUsageLogsOptionalParameters) WithEndHr(endHr time.Time) *GetUsageLog
// GetUsageLogs Get hourly usage for logs.
// Get hourly usage for logs.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageLogs(ctx _context.Context, startHr time.Time, o ...GetUsageLogsOptionalParameters) (UsageLogsResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -2534,7 +2408,9 @@ func (r *GetUsageLogsByRetentionOptionalParameters) WithEndHr(endHr time.Time) *
// GetUsageLogsByRetention Get hourly logs usage by retention.
// Get hourly usage for indexed logs by retention period.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageLogsByRetention(ctx _context.Context, startHr time.Time, o ...GetUsageLogsByRetentionOptionalParameters) (UsageLogsByRetentionResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -2634,7 +2510,9 @@ func (r *GetUsageNetworkFlowsOptionalParameters) WithEndHr(endHr time.Time) *Get
// GetUsageNetworkFlows get hourly usage for network flows.
// Get hourly usage for network flows.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageNetworkFlows(ctx _context.Context, startHr time.Time, o ...GetUsageNetworkFlowsOptionalParameters) (UsageNetworkFlowsResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -2734,7 +2612,9 @@ func (r *GetUsageNetworkHostsOptionalParameters) WithEndHr(endHr time.Time) *Get
// GetUsageNetworkHosts Get hourly usage for network hosts.
// Get hourly usage for network hosts.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageNetworkHosts(ctx _context.Context, startHr time.Time, o ...GetUsageNetworkHostsOptionalParameters) (UsageNetworkHostsResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -2834,7 +2714,9 @@ func (r *GetUsageOnlineArchiveOptionalParameters) WithEndHr(endHr time.Time) *Ge
// GetUsageOnlineArchive Get hourly usage for online archive.
// Get hourly usage for online archive.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageOnlineArchive(ctx _context.Context, startHr time.Time, o ...GetUsageOnlineArchiveOptionalParameters) (UsageOnlineArchiveResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -2934,7 +2816,9 @@ func (r *GetUsageProfilingOptionalParameters) WithEndHr(endHr time.Time) *GetUsa
// GetUsageProfiling Get hourly usage for profiled hosts.
// Get hourly usage for profiled hosts.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageProfiling(ctx _context.Context, startHr time.Time, o ...GetUsageProfilingOptionalParameters) (UsageProfilingResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -3041,7 +2925,9 @@ func (r *GetUsageRumSessionsOptionalParameters) WithType(typeVar string) *GetUsa
// GetUsageRumSessions Get hourly usage for RUM sessions.
// Get hourly usage for [RUM](https://docs.datadoghq.com/real_user_monitoring/) Sessions.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageRumSessions(ctx _context.Context, startHr time.Time, o ...GetUsageRumSessionsOptionalParameters) (UsageRumSessionsResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -3144,7 +3030,9 @@ func (r *GetUsageRumUnitsOptionalParameters) WithEndHr(endHr time.Time) *GetUsag
// GetUsageRumUnits Get hourly usage for RUM units.
// Get hourly usage for [RUM](https://docs.datadoghq.com/real_user_monitoring/) Units.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageRumUnits(ctx _context.Context, startHr time.Time, o ...GetUsageRumUnitsOptionalParameters) (UsageRumUnitsResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -3244,7 +3132,9 @@ func (r *GetUsageSDSOptionalParameters) WithEndHr(endHr time.Time) *GetUsageSDSO
// GetUsageSDS Get hourly usage for sensitive data scanner.
// Get hourly usage for sensitive data scanner.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageSDS(ctx _context.Context, startHr time.Time, o ...GetUsageSDSOptionalParameters) (UsageSDSResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -3344,7 +3234,9 @@ func (r *GetUsageSNMPOptionalParameters) WithEndHr(endHr time.Time) *GetUsageSNM
// GetUsageSNMP Get hourly usage for SNMP devices.
// Get hourly usage for SNMP devices.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageSNMP(ctx _context.Context, startHr time.Time, o ...GetUsageSNMPOptionalParameters) (UsageSNMPResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -3451,6 +3343,8 @@ func (r *GetUsageSummaryOptionalParameters) WithIncludeOrgDetails(includeOrgDeta
// GetUsageSummary Get usage across your account.
// Get all usage across your account.
+//
+// This endpoint is only accessible for [parent-level organizations](https://docs.datadoghq.com/account_management/multi_organization/).
func (a *UsageMeteringApi) GetUsageSummary(ctx _context.Context, startMonth time.Time, o ...GetUsageSummaryOptionalParameters) (UsageSummaryResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -3553,7 +3447,7 @@ func (r *GetUsageSyntheticsOptionalParameters) WithEndHr(endHr time.Time) *GetUs
// GetUsageSynthetics Get hourly usage for synthetics checks.
// Get hourly usage for [synthetics checks](https://docs.datadoghq.com/synthetics/).
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
//
// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageSynthetics(ctx _context.Context, startHr time.Time, o ...GetUsageSyntheticsOptionalParameters) (UsageSyntheticsResponse, *_nethttp.Response, error) {
@@ -3655,7 +3549,9 @@ func (r *GetUsageSyntheticsAPIOptionalParameters) WithEndHr(endHr time.Time) *Ge
// GetUsageSyntheticsAPI Get hourly usage for synthetics API checks.
// Get hourly usage for [synthetics API checks](https://docs.datadoghq.com/synthetics/).
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageSyntheticsAPI(ctx _context.Context, startHr time.Time, o ...GetUsageSyntheticsAPIOptionalParameters) (UsageSyntheticsAPIResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -3755,7 +3651,9 @@ func (r *GetUsageSyntheticsBrowserOptionalParameters) WithEndHr(endHr time.Time)
// GetUsageSyntheticsBrowser Get hourly usage for synthetics browser checks.
// Get hourly usage for synthetics browser checks.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageSyntheticsBrowser(ctx _context.Context, startHr time.Time, o ...GetUsageSyntheticsBrowserOptionalParameters) (UsageSyntheticsBrowserResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -3855,7 +3753,9 @@ func (r *GetUsageTimeseriesOptionalParameters) WithEndHr(endHr time.Time) *GetUs
// GetUsageTimeseries Get hourly usage for custom metrics.
// Get hourly usage for [custom metrics](https://docs.datadoghq.com/developers/metrics/custom_metrics/).
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family). Refer to [Migrating from the V1 Hourly Usage APIs to V2](https://docs.datadoghq.com/account_management/guide/hourly-usage-migration/) for the associated migration guide.
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageTimeseries(ctx _context.Context, startHr time.Time, o ...GetUsageTimeseriesOptionalParameters) (UsageTimeseriesResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/doc.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/doc.go
index 85245595a3..9d8b94fbd9 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/doc.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/doc.go
@@ -191,7 +191,6 @@
// - [UsageMeteringApi.GetSpecifiedDailyCustomReports]
// - [UsageMeteringApi.GetSpecifiedMonthlyCustomReports]
// - [UsageMeteringApi.GetUsageAnalyzedLogs]
-// - [UsageMeteringApi.GetUsageAttribution]
// - [UsageMeteringApi.GetUsageAuditLogs]
// - [UsageMeteringApi.GetUsageBillableSummary]
// - [UsageMeteringApi.GetUsageCIApp]
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_aws_account.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_aws_account.go
index a4dfa7b494..609d56c502 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_aws_account.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_aws_account.go
@@ -23,6 +23,8 @@ type AWSAccount struct {
// An array of [AWS regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints)
// to exclude from metrics collection.
ExcludedRegions []string `json:"excluded_regions,omitempty"`
+ // Whether Datadog collects additional attributes and configuration information about the resources in your AWS account. Required for `cspm_resource_collection`.
+ ExtendedResourceCollectionEnabled *bool `json:"extended_resource_collection_enabled,omitempty"`
// The array of EC2 tags (in the form `key:value`) defines a filter that Datadog uses when collecting metrics from EC2.
// Wildcards, such as `?` (for single characters) and `*` (for multiple characters) can also be used.
// Only hosts that match one of the defined tags
@@ -35,7 +37,8 @@ type AWSAccount struct {
HostTags []string `json:"host_tags,omitempty"`
// Whether Datadog collects metrics for this AWS account.
MetricsCollectionEnabled *bool `json:"metrics_collection_enabled,omitempty"`
- // Whether Datadog collects a standard set of resources from your AWS account.
+ // Deprecated in favor of 'extended_resource_collection_enabled'. Whether Datadog collects a standard set of resources from your AWS account.
+ // Deprecated
ResourceCollectionEnabled *bool `json:"resource_collection_enabled,omitempty"`
// Your Datadog role delegation name.
RoleName *string `json:"role_name,omitempty"`
@@ -54,6 +57,8 @@ func NewAWSAccount() *AWSAccount {
this := AWSAccount{}
var cspmResourceCollectionEnabled bool = false
this.CspmResourceCollectionEnabled = &cspmResourceCollectionEnabled
+ var extendedResourceCollectionEnabled bool = false
+ this.ExtendedResourceCollectionEnabled = &extendedResourceCollectionEnabled
var metricsCollectionEnabled bool = true
this.MetricsCollectionEnabled = &metricsCollectionEnabled
var resourceCollectionEnabled bool = false
@@ -68,6 +73,8 @@ func NewAWSAccountWithDefaults() *AWSAccount {
this := AWSAccount{}
var cspmResourceCollectionEnabled bool = false
this.CspmResourceCollectionEnabled = &cspmResourceCollectionEnabled
+ var extendedResourceCollectionEnabled bool = false
+ this.ExtendedResourceCollectionEnabled = &extendedResourceCollectionEnabled
var metricsCollectionEnabled bool = true
this.MetricsCollectionEnabled = &metricsCollectionEnabled
var resourceCollectionEnabled bool = false
@@ -215,6 +222,34 @@ func (o *AWSAccount) SetExcludedRegions(v []string) {
o.ExcludedRegions = v
}
+// GetExtendedResourceCollectionEnabled returns the ExtendedResourceCollectionEnabled field value if set, zero value otherwise.
+func (o *AWSAccount) GetExtendedResourceCollectionEnabled() bool {
+ if o == nil || o.ExtendedResourceCollectionEnabled == nil {
+ var ret bool
+ return ret
+ }
+ return *o.ExtendedResourceCollectionEnabled
+}
+
+// GetExtendedResourceCollectionEnabledOk returns a tuple with the ExtendedResourceCollectionEnabled field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *AWSAccount) GetExtendedResourceCollectionEnabledOk() (*bool, bool) {
+ if o == nil || o.ExtendedResourceCollectionEnabled == nil {
+ return nil, false
+ }
+ return o.ExtendedResourceCollectionEnabled, true
+}
+
+// HasExtendedResourceCollectionEnabled returns a boolean if a field has been set.
+func (o *AWSAccount) HasExtendedResourceCollectionEnabled() bool {
+ return o != nil && o.ExtendedResourceCollectionEnabled != nil
+}
+
+// SetExtendedResourceCollectionEnabled gets a reference to the given bool and assigns it to the ExtendedResourceCollectionEnabled field.
+func (o *AWSAccount) SetExtendedResourceCollectionEnabled(v bool) {
+ o.ExtendedResourceCollectionEnabled = &v
+}
+
// GetFilterTags returns the FilterTags field value if set, zero value otherwise.
func (o *AWSAccount) GetFilterTags() []string {
if o == nil || o.FilterTags == nil {
@@ -300,6 +335,7 @@ func (o *AWSAccount) SetMetricsCollectionEnabled(v bool) {
}
// GetResourceCollectionEnabled returns the ResourceCollectionEnabled field value if set, zero value otherwise.
+// Deprecated
func (o *AWSAccount) GetResourceCollectionEnabled() bool {
if o == nil || o.ResourceCollectionEnabled == nil {
var ret bool
@@ -310,6 +346,7 @@ func (o *AWSAccount) GetResourceCollectionEnabled() bool {
// GetResourceCollectionEnabledOk returns a tuple with the ResourceCollectionEnabled field value if set, nil otherwise
// and a boolean to check if the value has been set.
+// Deprecated
func (o *AWSAccount) GetResourceCollectionEnabledOk() (*bool, bool) {
if o == nil || o.ResourceCollectionEnabled == nil {
return nil, false
@@ -323,6 +360,7 @@ func (o *AWSAccount) HasResourceCollectionEnabled() bool {
}
// SetResourceCollectionEnabled gets a reference to the given bool and assigns it to the ResourceCollectionEnabled field.
+// Deprecated
func (o *AWSAccount) SetResourceCollectionEnabled(v bool) {
o.ResourceCollectionEnabled = &v
}
@@ -404,6 +442,9 @@ func (o AWSAccount) MarshalJSON() ([]byte, error) {
if o.ExcludedRegions != nil {
toSerialize["excluded_regions"] = o.ExcludedRegions
}
+ if o.ExtendedResourceCollectionEnabled != nil {
+ toSerialize["extended_resource_collection_enabled"] = o.ExtendedResourceCollectionEnabled
+ }
if o.FilterTags != nil {
toSerialize["filter_tags"] = o.FilterTags
}
@@ -432,24 +473,25 @@ func (o AWSAccount) MarshalJSON() ([]byte, error) {
// UnmarshalJSON deserializes the given payload.
func (o *AWSAccount) UnmarshalJSON(bytes []byte) (err error) {
all := struct {
- AccessKeyId *string `json:"access_key_id,omitempty"`
- AccountId *string `json:"account_id,omitempty"`
- AccountSpecificNamespaceRules map[string]bool `json:"account_specific_namespace_rules,omitempty"`
- CspmResourceCollectionEnabled *bool `json:"cspm_resource_collection_enabled,omitempty"`
- ExcludedRegions []string `json:"excluded_regions,omitempty"`
- FilterTags []string `json:"filter_tags,omitempty"`
- HostTags []string `json:"host_tags,omitempty"`
- MetricsCollectionEnabled *bool `json:"metrics_collection_enabled,omitempty"`
- ResourceCollectionEnabled *bool `json:"resource_collection_enabled,omitempty"`
- RoleName *string `json:"role_name,omitempty"`
- SecretAccessKey *string `json:"secret_access_key,omitempty"`
+ AccessKeyId *string `json:"access_key_id,omitempty"`
+ AccountId *string `json:"account_id,omitempty"`
+ AccountSpecificNamespaceRules map[string]bool `json:"account_specific_namespace_rules,omitempty"`
+ CspmResourceCollectionEnabled *bool `json:"cspm_resource_collection_enabled,omitempty"`
+ ExcludedRegions []string `json:"excluded_regions,omitempty"`
+ ExtendedResourceCollectionEnabled *bool `json:"extended_resource_collection_enabled,omitempty"`
+ FilterTags []string `json:"filter_tags,omitempty"`
+ HostTags []string `json:"host_tags,omitempty"`
+ MetricsCollectionEnabled *bool `json:"metrics_collection_enabled,omitempty"`
+ ResourceCollectionEnabled *bool `json:"resource_collection_enabled,omitempty"`
+ RoleName *string `json:"role_name,omitempty"`
+ SecretAccessKey *string `json:"secret_access_key,omitempty"`
}{}
if err = datadog.Unmarshal(bytes, &all); err != nil {
return datadog.Unmarshal(bytes, &o.UnparsedObject)
}
additionalProperties := make(map[string]interface{})
if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
- datadog.DeleteKeys(additionalProperties, &[]string{"access_key_id", "account_id", "account_specific_namespace_rules", "cspm_resource_collection_enabled", "excluded_regions", "filter_tags", "host_tags", "metrics_collection_enabled", "resource_collection_enabled", "role_name", "secret_access_key"})
+ datadog.DeleteKeys(additionalProperties, &[]string{"access_key_id", "account_id", "account_specific_namespace_rules", "cspm_resource_collection_enabled", "excluded_regions", "extended_resource_collection_enabled", "filter_tags", "host_tags", "metrics_collection_enabled", "resource_collection_enabled", "role_name", "secret_access_key"})
} else {
return err
}
@@ -458,6 +500,7 @@ func (o *AWSAccount) UnmarshalJSON(bytes []byte) (err error) {
o.AccountSpecificNamespaceRules = all.AccountSpecificNamespaceRules
o.CspmResourceCollectionEnabled = all.CspmResourceCollectionEnabled
o.ExcludedRegions = all.ExcludedRegions
+ o.ExtendedResourceCollectionEnabled = all.ExtendedResourceCollectionEnabled
o.FilterTags = all.FilterTags
o.HostTags = all.HostTags
o.MetricsCollectionEnabled = all.MetricsCollectionEnabled
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_hourly_usage_attribution_usage_type.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_hourly_usage_attribution_usage_type.go
index 8a596edf93..d546c367db 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_hourly_usage_attribution_usage_type.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_hourly_usage_attribution_usage_type.go
@@ -15,72 +15,75 @@ type HourlyUsageAttributionUsageType string
// List of HourlyUsageAttributionUsageType.
const (
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_API_USAGE HourlyUsageAttributionUsageType = "api_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_APM_FARGATE_USAGE HourlyUsageAttributionUsageType = "apm_fargate_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_APM_HOST_USAGE HourlyUsageAttributionUsageType = "apm_host_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_APM_USM_USAGE HourlyUsageAttributionUsageType = "apm_usm_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_APPSEC_FARGATE_USAGE HourlyUsageAttributionUsageType = "appsec_fargate_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_APPSEC_USAGE HourlyUsageAttributionUsageType = "appsec_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_ASM_SERVERLESS_TRACED_INVOCATIONS_USAGE HourlyUsageAttributionUsageType = "asm_serverless_traced_invocations_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_ASM_SERVERLESS_TRACED_INVOCATIONS_PERCENTAGE HourlyUsageAttributionUsageType = "asm_serverless_traced_invocations_percentage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_BROWSER_USAGE HourlyUsageAttributionUsageType = "browser_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_CI_PIPELINE_INDEXED_SPANS_USAGE HourlyUsageAttributionUsageType = "ci_pipeline_indexed_spans_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_CI_TEST_INDEXED_SPANS_USAGE HourlyUsageAttributionUsageType = "ci_test_indexed_spans_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_CI_VISIBILITY_ITR_USAGE HourlyUsageAttributionUsageType = "ci_visibility_itr_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_CLOUD_SIEM_USAGE HourlyUsageAttributionUsageType = "cloud_siem_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_CONTAINER_EXCL_AGENT_USAGE HourlyUsageAttributionUsageType = "container_excl_agent_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_CONTAINER_USAGE HourlyUsageAttributionUsageType = "container_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_CSPM_CONTAINERS_USAGE HourlyUsageAttributionUsageType = "cspm_containers_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_CSPM_HOSTS_USAGE HourlyUsageAttributionUsageType = "cspm_hosts_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_CUSTOM_EVENT_USAGE HourlyUsageAttributionUsageType = "custom_event_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_CUSTOM_INGESTED_TIMESERIES_USAGE HourlyUsageAttributionUsageType = "custom_ingested_timeseries_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_CUSTOM_TIMESERIES_USAGE HourlyUsageAttributionUsageType = "custom_timeseries_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_CWS_CONTAINERS_USAGE HourlyUsageAttributionUsageType = "cws_containers_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_CWS_HOSTS_USAGE HourlyUsageAttributionUsageType = "cws_hosts_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_DBM_HOSTS_USAGE HourlyUsageAttributionUsageType = "dbm_hosts_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_DBM_QUERIES_USAGE HourlyUsageAttributionUsageType = "dbm_queries_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_ERROR_TRACKING_USAGE HourlyUsageAttributionUsageType = "error_tracking_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_ERROR_TRACKING_PERCENTAGE HourlyUsageAttributionUsageType = "error_tracking_percentage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_ESTIMATED_INDEXED_LOGS_USAGE HourlyUsageAttributionUsageType = "estimated_indexed_logs_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_ESTIMATED_INDEXED_SPANS_USAGE HourlyUsageAttributionUsageType = "estimated_indexed_spans_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_ESTIMATED_INGESTED_LOGS_USAGE HourlyUsageAttributionUsageType = "estimated_ingested_logs_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_ESTIMATED_INGESTED_SPANS_USAGE HourlyUsageAttributionUsageType = "estimated_ingested_spans_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_ESTIMATED_RUM_SESSIONS_USAGE HourlyUsageAttributionUsageType = "estimated_rum_sessions_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_FARGATE_USAGE HourlyUsageAttributionUsageType = "fargate_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_FUNCTIONS_USAGE HourlyUsageAttributionUsageType = "functions_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_INDEXED_SPANS_USAGE HourlyUsageAttributionUsageType = "indexed_spans_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_INFRA_HOST_USAGE HourlyUsageAttributionUsageType = "infra_host_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_INGESTED_LOGS_BYTES_USAGE HourlyUsageAttributionUsageType = "ingested_logs_bytes_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_INGESTED_SPANS_BYTES_USAGE HourlyUsageAttributionUsageType = "ingested_spans_bytes_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_INVOCATIONS_USAGE HourlyUsageAttributionUsageType = "invocations_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_LAMBDA_TRACED_INVOCATIONS_USAGE HourlyUsageAttributionUsageType = "lambda_traced_invocations_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_15DAY_USAGE HourlyUsageAttributionUsageType = "logs_indexed_15day_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_180DAY_USAGE HourlyUsageAttributionUsageType = "logs_indexed_180day_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_30DAY_USAGE HourlyUsageAttributionUsageType = "logs_indexed_30day_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_360DAY_USAGE HourlyUsageAttributionUsageType = "logs_indexed_360day_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_3DAY_USAGE HourlyUsageAttributionUsageType = "logs_indexed_3day_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_45DAY_USAGE HourlyUsageAttributionUsageType = "logs_indexed_45day_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_60DAY_USAGE HourlyUsageAttributionUsageType = "logs_indexed_60day_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_7DAY_USAGE HourlyUsageAttributionUsageType = "logs_indexed_7day_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_90DAY_USAGE HourlyUsageAttributionUsageType = "logs_indexed_90day_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_CUSTOM_RETENTION_USAGE HourlyUsageAttributionUsageType = "logs_indexed_custom_retention_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_MOBILE_APP_TESTING_USAGE HourlyUsageAttributionUsageType = "mobile_app_testing_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_NDM_NETFLOW_USAGE HourlyUsageAttributionUsageType = "ndm_netflow_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_NPM_HOST_USAGE HourlyUsageAttributionUsageType = "npm_host_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_OBS_PIPELINE_BYTES_USAGE HourlyUsageAttributionUsageType = "obs_pipeline_bytes_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_OBS_PIPELINE_VCPU_USAGE HourlyUsageAttributionUsageType = "obs_pipelines_vcpu_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_PROFILED_CONTAINER_USAGE HourlyUsageAttributionUsageType = "profiled_container_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_PROFILED_FARGATE_USAGE HourlyUsageAttributionUsageType = "profiled_fargate_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_PROFILED_HOST_USAGE HourlyUsageAttributionUsageType = "profiled_host_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_RUM_BROWSER_MOBILE_SESSIONS_USAGE HourlyUsageAttributionUsageType = "rum_browser_mobile_sessions_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_RUM_REPLAY_SESSIONS_USAGE HourlyUsageAttributionUsageType = "rum_replay_sessions_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_SDS_SCANNED_BYTES_USAGE HourlyUsageAttributionUsageType = "sds_scanned_bytes_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_SERVERLESS_APPS_USAGE HourlyUsageAttributionUsageType = "serverless_apps_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_SIEM_INGESTED_BYTES_USAGE HourlyUsageAttributionUsageType = "siem_ingested_bytes_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_SNMP_USAGE HourlyUsageAttributionUsageType = "snmp_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_UNIVERSAL_SERVICE_MONITORING_USAGE HourlyUsageAttributionUsageType = "universal_service_monitoring_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_VULN_MANAGEMENT_HOSTS_USAGE HourlyUsageAttributionUsageType = "vuln_management_hosts_usage"
- HOURLYUSAGEATTRIBUTIONUSAGETYPE_WORKFLOW_EXECUTIONS_USAGE HourlyUsageAttributionUsageType = "workflow_executions_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_API_USAGE HourlyUsageAttributionUsageType = "api_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_APM_FARGATE_USAGE HourlyUsageAttributionUsageType = "apm_fargate_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_APM_HOST_USAGE HourlyUsageAttributionUsageType = "apm_host_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_APM_USM_USAGE HourlyUsageAttributionUsageType = "apm_usm_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_APPSEC_FARGATE_USAGE HourlyUsageAttributionUsageType = "appsec_fargate_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_APPSEC_USAGE HourlyUsageAttributionUsageType = "appsec_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_ASM_SERVERLESS_TRACED_INVOCATIONS_USAGE HourlyUsageAttributionUsageType = "asm_serverless_traced_invocations_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_ASM_SERVERLESS_TRACED_INVOCATIONS_PERCENTAGE HourlyUsageAttributionUsageType = "asm_serverless_traced_invocations_percentage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_BROWSER_USAGE HourlyUsageAttributionUsageType = "browser_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_CI_PIPELINE_INDEXED_SPANS_USAGE HourlyUsageAttributionUsageType = "ci_pipeline_indexed_spans_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_CI_TEST_INDEXED_SPANS_USAGE HourlyUsageAttributionUsageType = "ci_test_indexed_spans_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_CI_VISIBILITY_ITR_USAGE HourlyUsageAttributionUsageType = "ci_visibility_itr_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_CLOUD_SIEM_USAGE HourlyUsageAttributionUsageType = "cloud_siem_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_CONTAINER_EXCL_AGENT_USAGE HourlyUsageAttributionUsageType = "container_excl_agent_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_CONTAINER_USAGE HourlyUsageAttributionUsageType = "container_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_CSPM_CONTAINERS_USAGE HourlyUsageAttributionUsageType = "cspm_containers_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_CSPM_HOSTS_USAGE HourlyUsageAttributionUsageType = "cspm_hosts_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_CUSTOM_EVENT_USAGE HourlyUsageAttributionUsageType = "custom_event_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_CUSTOM_INGESTED_TIMESERIES_USAGE HourlyUsageAttributionUsageType = "custom_ingested_timeseries_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_CUSTOM_TIMESERIES_USAGE HourlyUsageAttributionUsageType = "custom_timeseries_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_CWS_CONTAINERS_USAGE HourlyUsageAttributionUsageType = "cws_containers_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_CWS_HOSTS_USAGE HourlyUsageAttributionUsageType = "cws_hosts_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_DBM_HOSTS_USAGE HourlyUsageAttributionUsageType = "dbm_hosts_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_DBM_QUERIES_USAGE HourlyUsageAttributionUsageType = "dbm_queries_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_ERROR_TRACKING_USAGE HourlyUsageAttributionUsageType = "error_tracking_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_ERROR_TRACKING_PERCENTAGE HourlyUsageAttributionUsageType = "error_tracking_percentage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_ESTIMATED_INDEXED_LOGS_USAGE HourlyUsageAttributionUsageType = "estimated_indexed_logs_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_ESTIMATED_INDEXED_SPANS_USAGE HourlyUsageAttributionUsageType = "estimated_indexed_spans_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_ESTIMATED_INGESTED_LOGS_USAGE HourlyUsageAttributionUsageType = "estimated_ingested_logs_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_ESTIMATED_INGESTED_SPANS_USAGE HourlyUsageAttributionUsageType = "estimated_ingested_spans_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_ESTIMATED_RUM_SESSIONS_USAGE HourlyUsageAttributionUsageType = "estimated_rum_sessions_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_FARGATE_USAGE HourlyUsageAttributionUsageType = "fargate_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_FUNCTIONS_USAGE HourlyUsageAttributionUsageType = "functions_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_INCIDENT_MANAGEMENT_MONTHLY_ACTIVE_USERS_USAGE HourlyUsageAttributionUsageType = "incident_management_monthly_active_users_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_INDEXED_SPANS_USAGE HourlyUsageAttributionUsageType = "indexed_spans_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_INFRA_HOST_USAGE HourlyUsageAttributionUsageType = "infra_host_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_INGESTED_LOGS_BYTES_USAGE HourlyUsageAttributionUsageType = "ingested_logs_bytes_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_INGESTED_SPANS_BYTES_USAGE HourlyUsageAttributionUsageType = "ingested_spans_bytes_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_INVOCATIONS_USAGE HourlyUsageAttributionUsageType = "invocations_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_LAMBDA_TRACED_INVOCATIONS_USAGE HourlyUsageAttributionUsageType = "lambda_traced_invocations_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_15DAY_USAGE HourlyUsageAttributionUsageType = "logs_indexed_15day_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_180DAY_USAGE HourlyUsageAttributionUsageType = "logs_indexed_180day_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_1DAY_USAGE HourlyUsageAttributionUsageType = "logs_indexed_1day_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_30DAY_USAGE HourlyUsageAttributionUsageType = "logs_indexed_30day_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_360DAY_USAGE HourlyUsageAttributionUsageType = "logs_indexed_360day_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_3DAY_USAGE HourlyUsageAttributionUsageType = "logs_indexed_3day_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_45DAY_USAGE HourlyUsageAttributionUsageType = "logs_indexed_45day_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_60DAY_USAGE HourlyUsageAttributionUsageType = "logs_indexed_60day_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_7DAY_USAGE HourlyUsageAttributionUsageType = "logs_indexed_7day_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_90DAY_USAGE HourlyUsageAttributionUsageType = "logs_indexed_90day_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_CUSTOM_RETENTION_USAGE HourlyUsageAttributionUsageType = "logs_indexed_custom_retention_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_MOBILE_APP_TESTING_USAGE HourlyUsageAttributionUsageType = "mobile_app_testing_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_NDM_NETFLOW_USAGE HourlyUsageAttributionUsageType = "ndm_netflow_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_NPM_HOST_USAGE HourlyUsageAttributionUsageType = "npm_host_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_OBS_PIPELINE_BYTES_USAGE HourlyUsageAttributionUsageType = "obs_pipeline_bytes_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_OBS_PIPELINE_VCPU_USAGE HourlyUsageAttributionUsageType = "obs_pipelines_vcpu_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_ONLINE_ARCHIVE_USAGE HourlyUsageAttributionUsageType = "online_archive_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_PROFILED_CONTAINER_USAGE HourlyUsageAttributionUsageType = "profiled_container_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_PROFILED_FARGATE_USAGE HourlyUsageAttributionUsageType = "profiled_fargate_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_PROFILED_HOST_USAGE HourlyUsageAttributionUsageType = "profiled_host_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_RUM_BROWSER_MOBILE_SESSIONS_USAGE HourlyUsageAttributionUsageType = "rum_browser_mobile_sessions_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_RUM_REPLAY_SESSIONS_USAGE HourlyUsageAttributionUsageType = "rum_replay_sessions_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_SDS_SCANNED_BYTES_USAGE HourlyUsageAttributionUsageType = "sds_scanned_bytes_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_SERVERLESS_APPS_USAGE HourlyUsageAttributionUsageType = "serverless_apps_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_SIEM_INGESTED_BYTES_USAGE HourlyUsageAttributionUsageType = "siem_ingested_bytes_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_SNMP_USAGE HourlyUsageAttributionUsageType = "snmp_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_UNIVERSAL_SERVICE_MONITORING_USAGE HourlyUsageAttributionUsageType = "universal_service_monitoring_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_VULN_MANAGEMENT_HOSTS_USAGE HourlyUsageAttributionUsageType = "vuln_management_hosts_usage"
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_WORKFLOW_EXECUTIONS_USAGE HourlyUsageAttributionUsageType = "workflow_executions_usage"
)
var allowedHourlyUsageAttributionUsageTypeEnumValues = []HourlyUsageAttributionUsageType{
@@ -117,6 +120,7 @@ var allowedHourlyUsageAttributionUsageTypeEnumValues = []HourlyUsageAttributionU
HOURLYUSAGEATTRIBUTIONUSAGETYPE_ESTIMATED_RUM_SESSIONS_USAGE,
HOURLYUSAGEATTRIBUTIONUSAGETYPE_FARGATE_USAGE,
HOURLYUSAGEATTRIBUTIONUSAGETYPE_FUNCTIONS_USAGE,
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_INCIDENT_MANAGEMENT_MONTHLY_ACTIVE_USERS_USAGE,
HOURLYUSAGEATTRIBUTIONUSAGETYPE_INDEXED_SPANS_USAGE,
HOURLYUSAGEATTRIBUTIONUSAGETYPE_INFRA_HOST_USAGE,
HOURLYUSAGEATTRIBUTIONUSAGETYPE_INGESTED_LOGS_BYTES_USAGE,
@@ -125,6 +129,7 @@ var allowedHourlyUsageAttributionUsageTypeEnumValues = []HourlyUsageAttributionU
HOURLYUSAGEATTRIBUTIONUSAGETYPE_LAMBDA_TRACED_INVOCATIONS_USAGE,
HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_15DAY_USAGE,
HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_180DAY_USAGE,
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_1DAY_USAGE,
HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_30DAY_USAGE,
HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_360DAY_USAGE,
HOURLYUSAGEATTRIBUTIONUSAGETYPE_LOGS_INDEXED_3DAY_USAGE,
@@ -138,6 +143,7 @@ var allowedHourlyUsageAttributionUsageTypeEnumValues = []HourlyUsageAttributionU
HOURLYUSAGEATTRIBUTIONUSAGETYPE_NPM_HOST_USAGE,
HOURLYUSAGEATTRIBUTIONUSAGETYPE_OBS_PIPELINE_BYTES_USAGE,
HOURLYUSAGEATTRIBUTIONUSAGETYPE_OBS_PIPELINE_VCPU_USAGE,
+ HOURLYUSAGEATTRIBUTIONUSAGETYPE_ONLINE_ARCHIVE_USAGE,
HOURLYUSAGEATTRIBUTIONUSAGETYPE_PROFILED_CONTAINER_USAGE,
HOURLYUSAGEATTRIBUTIONUSAGETYPE_PROFILED_FARGATE_USAGE,
HOURLYUSAGEATTRIBUTIONUSAGETYPE_PROFILED_HOST_USAGE,
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_monitor_options.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_monitor_options.go
index 43697ffd85..d2176845b0 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_monitor_options.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_monitor_options.go
@@ -92,10 +92,13 @@ type MonitorOptions struct {
// The number of times re-notification messages should be sent on the current status at the provided re-notification interval.
RenotifyOccurrences datadog.NullableInt64 `json:"renotify_occurrences,omitempty"`
// The types of monitor statuses for which re-notification messages are sent.
+ // Default: **null** if `renotify_interval` is **null**.
+ // If `renotify_interval` is set, defaults to renotify on `Alert` and `No Data`.
RenotifyStatuses []MonitorRenotifyStatusType `json:"renotify_statuses,omitempty"`
// A Boolean indicating whether this monitor needs a full window of data before it’s evaluated.
// We highly recommend you set this to `false` for sparse metrics,
- // otherwise some evaluations are skipped. Default is false.
+ // otherwise some evaluations are skipped. Default is false. This setting only applies to
+ // metric monitors.
RequireFullWindow *bool `json:"require_full_window,omitempty"`
// Configuration options for scheduling.
SchedulingOptions *MonitorOptionsSchedulingOptions `json:"scheduling_options,omitempty"`
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_monthly_usage_attribution_supported_metrics.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_monthly_usage_attribution_supported_metrics.go
index d4dfce6107..35b60c86ea 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_monthly_usage_attribution_supported_metrics.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_monthly_usage_attribution_supported_metrics.go
@@ -15,135 +15,141 @@ type MonthlyUsageAttributionSupportedMetrics string
// List of MonthlyUsageAttributionSupportedMetrics.
const (
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_API_USAGE MonthlyUsageAttributionSupportedMetrics = "api_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_API_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "api_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_APM_FARGATE_USAGE MonthlyUsageAttributionSupportedMetrics = "apm_fargate_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_APM_FARGATE_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "apm_fargate_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_APPSEC_FARGATE_USAGE MonthlyUsageAttributionSupportedMetrics = "appsec_fargate_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_APPSEC_FARGATE_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "appsec_fargate_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_APM_HOST_USAGE MonthlyUsageAttributionSupportedMetrics = "apm_host_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_APM_HOST_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "apm_host_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_APM_USM_USAGE MonthlyUsageAttributionSupportedMetrics = "apm_usm_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_APM_USM_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "apm_usm_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_APPSEC_USAGE MonthlyUsageAttributionSupportedMetrics = "appsec_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_APPSEC_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "appsec_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ASM_SERVERLESS_TRACED_INVOCATIONS_USAGE MonthlyUsageAttributionSupportedMetrics = "asm_serverless_traced_invocations_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ASM_SERVERLESS_TRACED_INVOCATIONS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "asm_serverless_traced_invocations_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_BROWSER_USAGE MonthlyUsageAttributionSupportedMetrics = "browser_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_BROWSER_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "browser_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CI_VISIBILITY_ITR_USAGE MonthlyUsageAttributionSupportedMetrics = "ci_visibility_itr_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CI_VISIBILITY_ITR_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "ci_visibility_itr_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CLOUD_SIEM_USAGE MonthlyUsageAttributionSupportedMetrics = "cloud_siem_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CLOUD_SIEM_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "cloud_siem_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CONTAINER_EXCL_AGENT_USAGE MonthlyUsageAttributionSupportedMetrics = "container_excl_agent_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CONTAINER_EXCL_AGENT_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "container_excl_agent_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CONTAINER_USAGE MonthlyUsageAttributionSupportedMetrics = "container_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CONTAINER_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "container_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CSPM_CONTAINERS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "cspm_containers_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CSPM_CONTAINERS_USAGE MonthlyUsageAttributionSupportedMetrics = "cspm_containers_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CSPM_HOSTS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "cspm_hosts_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CSPM_HOSTS_USAGE MonthlyUsageAttributionSupportedMetrics = "cspm_hosts_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CUSTOM_TIMESERIES_USAGE MonthlyUsageAttributionSupportedMetrics = "custom_timeseries_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CUSTOM_TIMESERIES_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "custom_timeseries_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CUSTOM_INGESTED_TIMESERIES_USAGE MonthlyUsageAttributionSupportedMetrics = "custom_ingested_timeseries_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CUSTOM_INGESTED_TIMESERIES_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "custom_ingested_timeseries_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CWS_CONTAINERS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "cws_containers_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CWS_CONTAINERS_USAGE MonthlyUsageAttributionSupportedMetrics = "cws_containers_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CWS_HOSTS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "cws_hosts_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CWS_HOSTS_USAGE MonthlyUsageAttributionSupportedMetrics = "cws_hosts_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_DBM_HOSTS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "dbm_hosts_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_DBM_HOSTS_USAGE MonthlyUsageAttributionSupportedMetrics = "dbm_hosts_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_DBM_QUERIES_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "dbm_queries_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_DBM_QUERIES_USAGE MonthlyUsageAttributionSupportedMetrics = "dbm_queries_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ERROR_TRACKING_USAGE MonthlyUsageAttributionSupportedMetrics = "error_tracking_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ERROR_TRACKING_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "error_tracking_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INDEXED_LOGS_USAGE MonthlyUsageAttributionSupportedMetrics = "estimated_indexed_logs_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INDEXED_LOGS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "estimated_indexed_logs_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INGESTED_LOGS_USAGE MonthlyUsageAttributionSupportedMetrics = "estimated_ingested_logs_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INGESTED_LOGS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "estimated_ingested_logs_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INDEXED_SPANS_USAGE MonthlyUsageAttributionSupportedMetrics = "estimated_indexed_spans_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INDEXED_SPANS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "estimated_indexed_spans_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INGESTED_SPANS_USAGE MonthlyUsageAttributionSupportedMetrics = "estimated_ingested_spans_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INGESTED_SPANS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "estimated_ingested_spans_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_FARGATE_USAGE MonthlyUsageAttributionSupportedMetrics = "fargate_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_FARGATE_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "fargate_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_FUNCTIONS_USAGE MonthlyUsageAttributionSupportedMetrics = "functions_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_FUNCTIONS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "functions_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INFRA_HOST_USAGE MonthlyUsageAttributionSupportedMetrics = "infra_host_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INFRA_HOST_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "infra_host_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INVOCATIONS_USAGE MonthlyUsageAttributionSupportedMetrics = "invocations_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INVOCATIONS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "invocations_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LAMBDA_TRACED_INVOCATIONS_USAGE MonthlyUsageAttributionSupportedMetrics = "lambda_traced_invocations_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LAMBDA_TRACED_INVOCATIONS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "lambda_traced_invocations_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_MOBILE_APP_TESTING_USAGE MonthlyUsageAttributionSupportedMetrics = "mobile_app_testing_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_MOBILE_APP_TESTING_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "mobile_app_testing_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_NDM_NETFLOW_USAGE MonthlyUsageAttributionSupportedMetrics = "ndm_netflow_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_NDM_NETFLOW_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "ndm_netflow_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_NPM_HOST_USAGE MonthlyUsageAttributionSupportedMetrics = "npm_host_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_NPM_HOST_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "npm_host_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_OBS_PIPELINE_BYTES_USAGE MonthlyUsageAttributionSupportedMetrics = "obs_pipeline_bytes_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_OBS_PIPELINE_BYTES_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "obs_pipeline_bytes_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_OBS_PIPELINES_VCPU_USAGE MonthlyUsageAttributionSupportedMetrics = "obs_pipelines_vcpu_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_OBS_PIPELINES_VCPU_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "obs_pipelines_vcpu_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_CONTAINER_USAGE MonthlyUsageAttributionSupportedMetrics = "profiled_container_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_CONTAINER_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "profiled_container_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_FARGATE_USAGE MonthlyUsageAttributionSupportedMetrics = "profiled_fargate_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_FARGATE_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "profiled_fargate_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_HOST_USAGE MonthlyUsageAttributionSupportedMetrics = "profiled_host_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_HOST_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "profiled_host_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_SERVERLESS_APPS_USAGE MonthlyUsageAttributionSupportedMetrics = "serverless_apps_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_SERVERLESS_APPS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "serverless_apps_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_SNMP_USAGE MonthlyUsageAttributionSupportedMetrics = "snmp_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_SNMP_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "snmp_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_RUM_SESSIONS_USAGE MonthlyUsageAttributionSupportedMetrics = "estimated_rum_sessions_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_RUM_SESSIONS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "estimated_rum_sessions_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_UNIVERSAL_SERVICE_MONITORING_USAGE MonthlyUsageAttributionSupportedMetrics = "universal_service_monitoring_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_UNIVERSAL_SERVICE_MONITORING_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "universal_service_monitoring_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_VULN_MANAGEMENT_HOSTS_USAGE MonthlyUsageAttributionSupportedMetrics = "vuln_management_hosts_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_VULN_MANAGEMENT_HOSTS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "vuln_management_hosts_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_SDS_SCANNED_BYTES_USAGE MonthlyUsageAttributionSupportedMetrics = "sds_scanned_bytes_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_SDS_SCANNED_BYTES_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "sds_scanned_bytes_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CI_TEST_INDEXED_SPANS_USAGE MonthlyUsageAttributionSupportedMetrics = "ci_test_indexed_spans_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CI_TEST_INDEXED_SPANS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "ci_test_indexed_spans_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INGESTED_LOGS_BYTES_USAGE MonthlyUsageAttributionSupportedMetrics = "ingested_logs_bytes_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INGESTED_LOGS_BYTES_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "ingested_logs_bytes_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CI_PIPELINE_INDEXED_SPANS_USAGE MonthlyUsageAttributionSupportedMetrics = "ci_pipeline_indexed_spans_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CI_PIPELINE_INDEXED_SPANS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "ci_pipeline_indexed_spans_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INDEXED_SPANS_USAGE MonthlyUsageAttributionSupportedMetrics = "indexed_spans_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INDEXED_SPANS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "indexed_spans_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CUSTOM_EVENT_USAGE MonthlyUsageAttributionSupportedMetrics = "custom_event_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CUSTOM_EVENT_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "custom_event_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_CUSTOM_RETENTION_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_custom_retention_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_CUSTOM_RETENTION_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_custom_retention_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_360DAY_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_360day_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_360DAY_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_360day_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_180DAY_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_180day_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_180DAY_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_180day_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_90DAY_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_90day_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_90DAY_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_90day_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_60DAY_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_60day_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_60DAY_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_60day_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_45DAY_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_45day_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_45DAY_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_45day_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_30DAY_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_30day_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_30DAY_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_30day_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_15DAY_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_15day_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_15DAY_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_15day_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_7DAY_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_7day_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_7DAY_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_7day_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_3DAY_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_3day_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_3DAY_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_3day_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_RUM_REPLAY_SESSIONS_USAGE MonthlyUsageAttributionSupportedMetrics = "rum_replay_sessions_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_RUM_REPLAY_SESSIONS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "rum_replay_sessions_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_RUM_BROWSER_MOBILE_SESSIONS_USAGE MonthlyUsageAttributionSupportedMetrics = "rum_browser_mobile_sessions_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_RUM_BROWSER_MOBILE_SESSIONS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "rum_browser_mobile_sessions_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INGESTED_SPANS_BYTES_USAGE MonthlyUsageAttributionSupportedMetrics = "ingested_spans_bytes_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INGESTED_SPANS_BYTES_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "ingested_spans_bytes_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_SIEM_INGESTED_BYTES_USAGE MonthlyUsageAttributionSupportedMetrics = "siem_ingested_bytes_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_SIEM_INGESTED_BYTES_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "siem_ingested_bytes_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_WORKFLOW_EXECUTIONS_USAGE MonthlyUsageAttributionSupportedMetrics = "workflow_executions_usage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_WORKFLOW_EXECUTIONS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "workflow_executions_percentage"
- MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ALL MonthlyUsageAttributionSupportedMetrics = "*"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_API_USAGE MonthlyUsageAttributionSupportedMetrics = "api_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_API_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "api_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_APM_FARGATE_USAGE MonthlyUsageAttributionSupportedMetrics = "apm_fargate_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_APM_FARGATE_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "apm_fargate_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_APPSEC_FARGATE_USAGE MonthlyUsageAttributionSupportedMetrics = "appsec_fargate_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_APPSEC_FARGATE_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "appsec_fargate_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_APM_HOST_USAGE MonthlyUsageAttributionSupportedMetrics = "apm_host_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_APM_HOST_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "apm_host_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_APM_USM_USAGE MonthlyUsageAttributionSupportedMetrics = "apm_usm_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_APM_USM_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "apm_usm_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_APPSEC_USAGE MonthlyUsageAttributionSupportedMetrics = "appsec_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_APPSEC_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "appsec_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ASM_SERVERLESS_TRACED_INVOCATIONS_USAGE MonthlyUsageAttributionSupportedMetrics = "asm_serverless_traced_invocations_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ASM_SERVERLESS_TRACED_INVOCATIONS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "asm_serverless_traced_invocations_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_BROWSER_USAGE MonthlyUsageAttributionSupportedMetrics = "browser_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_BROWSER_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "browser_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CI_VISIBILITY_ITR_USAGE MonthlyUsageAttributionSupportedMetrics = "ci_visibility_itr_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CI_VISIBILITY_ITR_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "ci_visibility_itr_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CLOUD_SIEM_USAGE MonthlyUsageAttributionSupportedMetrics = "cloud_siem_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CLOUD_SIEM_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "cloud_siem_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CONTAINER_EXCL_AGENT_USAGE MonthlyUsageAttributionSupportedMetrics = "container_excl_agent_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CONTAINER_EXCL_AGENT_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "container_excl_agent_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CONTAINER_USAGE MonthlyUsageAttributionSupportedMetrics = "container_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CONTAINER_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "container_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CSPM_CONTAINERS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "cspm_containers_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CSPM_CONTAINERS_USAGE MonthlyUsageAttributionSupportedMetrics = "cspm_containers_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CSPM_HOSTS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "cspm_hosts_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CSPM_HOSTS_USAGE MonthlyUsageAttributionSupportedMetrics = "cspm_hosts_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CUSTOM_TIMESERIES_USAGE MonthlyUsageAttributionSupportedMetrics = "custom_timeseries_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CUSTOM_TIMESERIES_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "custom_timeseries_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CUSTOM_INGESTED_TIMESERIES_USAGE MonthlyUsageAttributionSupportedMetrics = "custom_ingested_timeseries_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CUSTOM_INGESTED_TIMESERIES_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "custom_ingested_timeseries_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CWS_CONTAINERS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "cws_containers_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CWS_CONTAINERS_USAGE MonthlyUsageAttributionSupportedMetrics = "cws_containers_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CWS_HOSTS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "cws_hosts_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CWS_HOSTS_USAGE MonthlyUsageAttributionSupportedMetrics = "cws_hosts_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_DBM_HOSTS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "dbm_hosts_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_DBM_HOSTS_USAGE MonthlyUsageAttributionSupportedMetrics = "dbm_hosts_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_DBM_QUERIES_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "dbm_queries_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_DBM_QUERIES_USAGE MonthlyUsageAttributionSupportedMetrics = "dbm_queries_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ERROR_TRACKING_USAGE MonthlyUsageAttributionSupportedMetrics = "error_tracking_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ERROR_TRACKING_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "error_tracking_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INDEXED_LOGS_USAGE MonthlyUsageAttributionSupportedMetrics = "estimated_indexed_logs_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INDEXED_LOGS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "estimated_indexed_logs_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INGESTED_LOGS_USAGE MonthlyUsageAttributionSupportedMetrics = "estimated_ingested_logs_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INGESTED_LOGS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "estimated_ingested_logs_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INDEXED_SPANS_USAGE MonthlyUsageAttributionSupportedMetrics = "estimated_indexed_spans_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INDEXED_SPANS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "estimated_indexed_spans_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INGESTED_SPANS_USAGE MonthlyUsageAttributionSupportedMetrics = "estimated_ingested_spans_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INGESTED_SPANS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "estimated_ingested_spans_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_FARGATE_USAGE MonthlyUsageAttributionSupportedMetrics = "fargate_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_FARGATE_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "fargate_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_FUNCTIONS_USAGE MonthlyUsageAttributionSupportedMetrics = "functions_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_FUNCTIONS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "functions_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INCIDENT_MANAGEMENT_MONTHLY_ACTIVE_USERS_USAGE MonthlyUsageAttributionSupportedMetrics = "incident_management_monthly_active_users_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INCIDENT_MANAGEMENT_MONTHLY_ACTIVE_USERS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "incident_management_monthly_active_users_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INFRA_HOST_USAGE MonthlyUsageAttributionSupportedMetrics = "infra_host_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INFRA_HOST_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "infra_host_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INVOCATIONS_USAGE MonthlyUsageAttributionSupportedMetrics = "invocations_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INVOCATIONS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "invocations_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LAMBDA_TRACED_INVOCATIONS_USAGE MonthlyUsageAttributionSupportedMetrics = "lambda_traced_invocations_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LAMBDA_TRACED_INVOCATIONS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "lambda_traced_invocations_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_MOBILE_APP_TESTING_USAGE MonthlyUsageAttributionSupportedMetrics = "mobile_app_testing_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_MOBILE_APP_TESTING_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "mobile_app_testing_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_NDM_NETFLOW_USAGE MonthlyUsageAttributionSupportedMetrics = "ndm_netflow_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_NDM_NETFLOW_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "ndm_netflow_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_NPM_HOST_USAGE MonthlyUsageAttributionSupportedMetrics = "npm_host_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_NPM_HOST_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "npm_host_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_OBS_PIPELINE_BYTES_USAGE MonthlyUsageAttributionSupportedMetrics = "obs_pipeline_bytes_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_OBS_PIPELINE_BYTES_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "obs_pipeline_bytes_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_OBS_PIPELINES_VCPU_USAGE MonthlyUsageAttributionSupportedMetrics = "obs_pipelines_vcpu_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_OBS_PIPELINES_VCPU_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "obs_pipelines_vcpu_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ONLINE_ARCHIVE_USAGE MonthlyUsageAttributionSupportedMetrics = "online_archive_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ONLINE_ARCHIVE_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "online_archive_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_CONTAINER_USAGE MonthlyUsageAttributionSupportedMetrics = "profiled_container_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_CONTAINER_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "profiled_container_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_FARGATE_USAGE MonthlyUsageAttributionSupportedMetrics = "profiled_fargate_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_FARGATE_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "profiled_fargate_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_HOST_USAGE MonthlyUsageAttributionSupportedMetrics = "profiled_host_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_HOST_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "profiled_host_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_SERVERLESS_APPS_USAGE MonthlyUsageAttributionSupportedMetrics = "serverless_apps_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_SERVERLESS_APPS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "serverless_apps_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_SNMP_USAGE MonthlyUsageAttributionSupportedMetrics = "snmp_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_SNMP_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "snmp_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_RUM_SESSIONS_USAGE MonthlyUsageAttributionSupportedMetrics = "estimated_rum_sessions_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_RUM_SESSIONS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "estimated_rum_sessions_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_UNIVERSAL_SERVICE_MONITORING_USAGE MonthlyUsageAttributionSupportedMetrics = "universal_service_monitoring_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_UNIVERSAL_SERVICE_MONITORING_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "universal_service_monitoring_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_VULN_MANAGEMENT_HOSTS_USAGE MonthlyUsageAttributionSupportedMetrics = "vuln_management_hosts_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_VULN_MANAGEMENT_HOSTS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "vuln_management_hosts_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_SDS_SCANNED_BYTES_USAGE MonthlyUsageAttributionSupportedMetrics = "sds_scanned_bytes_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_SDS_SCANNED_BYTES_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "sds_scanned_bytes_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CI_TEST_INDEXED_SPANS_USAGE MonthlyUsageAttributionSupportedMetrics = "ci_test_indexed_spans_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CI_TEST_INDEXED_SPANS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "ci_test_indexed_spans_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INGESTED_LOGS_BYTES_USAGE MonthlyUsageAttributionSupportedMetrics = "ingested_logs_bytes_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INGESTED_LOGS_BYTES_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "ingested_logs_bytes_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CI_PIPELINE_INDEXED_SPANS_USAGE MonthlyUsageAttributionSupportedMetrics = "ci_pipeline_indexed_spans_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CI_PIPELINE_INDEXED_SPANS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "ci_pipeline_indexed_spans_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INDEXED_SPANS_USAGE MonthlyUsageAttributionSupportedMetrics = "indexed_spans_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INDEXED_SPANS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "indexed_spans_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CUSTOM_EVENT_USAGE MonthlyUsageAttributionSupportedMetrics = "custom_event_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_CUSTOM_EVENT_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "custom_event_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_CUSTOM_RETENTION_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_custom_retention_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_CUSTOM_RETENTION_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_custom_retention_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_360DAY_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_360day_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_360DAY_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_360day_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_180DAY_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_180day_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_180DAY_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_180day_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_90DAY_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_90day_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_90DAY_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_90day_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_60DAY_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_60day_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_60DAY_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_60day_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_45DAY_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_45day_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_45DAY_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_45day_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_30DAY_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_30day_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_30DAY_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_30day_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_15DAY_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_15day_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_15DAY_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_15day_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_7DAY_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_7day_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_7DAY_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_7day_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_3DAY_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_3day_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_3DAY_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_3day_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_1DAY_USAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_1day_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_1DAY_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "logs_indexed_1day_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_RUM_REPLAY_SESSIONS_USAGE MonthlyUsageAttributionSupportedMetrics = "rum_replay_sessions_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_RUM_REPLAY_SESSIONS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "rum_replay_sessions_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_RUM_BROWSER_MOBILE_SESSIONS_USAGE MonthlyUsageAttributionSupportedMetrics = "rum_browser_mobile_sessions_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_RUM_BROWSER_MOBILE_SESSIONS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "rum_browser_mobile_sessions_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INGESTED_SPANS_BYTES_USAGE MonthlyUsageAttributionSupportedMetrics = "ingested_spans_bytes_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INGESTED_SPANS_BYTES_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "ingested_spans_bytes_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_SIEM_INGESTED_BYTES_USAGE MonthlyUsageAttributionSupportedMetrics = "siem_ingested_bytes_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_SIEM_INGESTED_BYTES_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "siem_ingested_bytes_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_WORKFLOW_EXECUTIONS_USAGE MonthlyUsageAttributionSupportedMetrics = "workflow_executions_usage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_WORKFLOW_EXECUTIONS_PERCENTAGE MonthlyUsageAttributionSupportedMetrics = "workflow_executions_percentage"
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ALL MonthlyUsageAttributionSupportedMetrics = "*"
)
var allowedMonthlyUsageAttributionSupportedMetricsEnumValues = []MonthlyUsageAttributionSupportedMetrics{
@@ -201,6 +207,8 @@ var allowedMonthlyUsageAttributionSupportedMetricsEnumValues = []MonthlyUsageAtt
MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_FARGATE_PERCENTAGE,
MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_FUNCTIONS_USAGE,
MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_FUNCTIONS_PERCENTAGE,
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INCIDENT_MANAGEMENT_MONTHLY_ACTIVE_USERS_USAGE,
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INCIDENT_MANAGEMENT_MONTHLY_ACTIVE_USERS_PERCENTAGE,
MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INFRA_HOST_USAGE,
MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INFRA_HOST_PERCENTAGE,
MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_INVOCATIONS_USAGE,
@@ -217,6 +225,8 @@ var allowedMonthlyUsageAttributionSupportedMetricsEnumValues = []MonthlyUsageAtt
MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_OBS_PIPELINE_BYTES_PERCENTAGE,
MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_OBS_PIPELINES_VCPU_USAGE,
MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_OBS_PIPELINES_VCPU_PERCENTAGE,
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ONLINE_ARCHIVE_USAGE,
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_ONLINE_ARCHIVE_PERCENTAGE,
MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_CONTAINER_USAGE,
MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_CONTAINER_PERCENTAGE,
MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_FARGATE_USAGE,
@@ -265,6 +275,8 @@ var allowedMonthlyUsageAttributionSupportedMetricsEnumValues = []MonthlyUsageAtt
MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_7DAY_PERCENTAGE,
MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_3DAY_USAGE,
MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_3DAY_PERCENTAGE,
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_1DAY_USAGE,
+ MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_LOGS_INDEXED_1DAY_PERCENTAGE,
MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_RUM_REPLAY_SESSIONS_USAGE,
MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_RUM_REPLAY_SESSIONS_PERCENTAGE,
MONTHLYUSAGEATTRIBUTIONSUPPORTEDMETRICS_RUM_BROWSER_MOBILE_SESSIONS_USAGE,
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_monthly_usage_attribution_values.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_monthly_usage_attribution_values.go
index 2d92fa1c28..81817c0f07 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_monthly_usage_attribution_values.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_monthly_usage_attribution_values.go
@@ -134,6 +134,10 @@ type MonthlyUsageAttributionValues struct {
FunctionsPercentage *float64 `json:"functions_percentage,omitempty"`
// The Lambda function usage by tag(s).
FunctionsUsage *float64 `json:"functions_usage,omitempty"`
+ // The percentage of Incident Management monthly active users usage by tag(s).
+ IncidentManagementMonthlyActiveUsersPercentage *float64 `json:"incident_management_monthly_active_users_percentage,omitempty"`
+ // The Incident Management monthly active users usage by tag(s).
+ IncidentManagementMonthlyActiveUsersUsage *float64 `json:"incident_management_monthly_active_users_usage,omitempty"`
// The percentage of APM Indexed Spans usage by tag(s).
IndexedSpansPercentage *float64 `json:"indexed_spans_percentage,omitempty"`
// The total APM Indexed Spans usage by tag(s).
@@ -166,6 +170,10 @@ type MonthlyUsageAttributionValues struct {
LogsIndexed180dayPercentage *float64 `json:"logs_indexed_180day_percentage,omitempty"`
// The total Indexed Logs (180-day Retention) usage by tag(s).
LogsIndexed180dayUsage *float64 `json:"logs_indexed_180day_usage,omitempty"`
+ // The percentage of Indexed Logs (1-day Retention) usage by tag(s).
+ LogsIndexed1dayPercentage *float64 `json:"logs_indexed_1day_percentage,omitempty"`
+ // The total Indexed Logs (1-day Retention) usage by tag(s).
+ LogsIndexed1dayUsage *float64 `json:"logs_indexed_1day_usage,omitempty"`
// The percentage of Indexed Logs (30-day Retention) usage by tag(s).
LogsIndexed30dayPercentage *float64 `json:"logs_indexed_30day_percentage,omitempty"`
// The total Indexed Logs (30-day Retention) usage by tag(s).
@@ -218,6 +226,10 @@ type MonthlyUsageAttributionValues struct {
ObsPipelinesVcpuPercentage *float64 `json:"obs_pipelines_vcpu_percentage,omitempty"`
// The observability pipeline per core usage by tag(s).
ObsPipelinesVcpuUsage *float64 `json:"obs_pipelines_vcpu_usage,omitempty"`
+ // The percentage of online archive usage by tag(s).
+ OnlineArchivePercentage *float64 `json:"online_archive_percentage,omitempty"`
+ // The online archive usage by tag(s).
+ OnlineArchiveUsage *float64 `json:"online_archive_usage,omitempty"`
// The percentage of profiled container usage by tag(s).
ProfiledContainerPercentage *float64 `json:"profiled_container_percentage,omitempty"`
// The profiled container usage by tag(s).
@@ -2024,6 +2036,62 @@ func (o *MonthlyUsageAttributionValues) SetFunctionsUsage(v float64) {
o.FunctionsUsage = &v
}
+// GetIncidentManagementMonthlyActiveUsersPercentage returns the IncidentManagementMonthlyActiveUsersPercentage field value if set, zero value otherwise.
+func (o *MonthlyUsageAttributionValues) GetIncidentManagementMonthlyActiveUsersPercentage() float64 {
+ if o == nil || o.IncidentManagementMonthlyActiveUsersPercentage == nil {
+ var ret float64
+ return ret
+ }
+ return *o.IncidentManagementMonthlyActiveUsersPercentage
+}
+
+// GetIncidentManagementMonthlyActiveUsersPercentageOk returns a tuple with the IncidentManagementMonthlyActiveUsersPercentage field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *MonthlyUsageAttributionValues) GetIncidentManagementMonthlyActiveUsersPercentageOk() (*float64, bool) {
+ if o == nil || o.IncidentManagementMonthlyActiveUsersPercentage == nil {
+ return nil, false
+ }
+ return o.IncidentManagementMonthlyActiveUsersPercentage, true
+}
+
+// HasIncidentManagementMonthlyActiveUsersPercentage returns a boolean if a field has been set.
+func (o *MonthlyUsageAttributionValues) HasIncidentManagementMonthlyActiveUsersPercentage() bool {
+ return o != nil && o.IncidentManagementMonthlyActiveUsersPercentage != nil
+}
+
+// SetIncidentManagementMonthlyActiveUsersPercentage gets a reference to the given float64 and assigns it to the IncidentManagementMonthlyActiveUsersPercentage field.
+func (o *MonthlyUsageAttributionValues) SetIncidentManagementMonthlyActiveUsersPercentage(v float64) {
+ o.IncidentManagementMonthlyActiveUsersPercentage = &v
+}
+
+// GetIncidentManagementMonthlyActiveUsersUsage returns the IncidentManagementMonthlyActiveUsersUsage field value if set, zero value otherwise.
+func (o *MonthlyUsageAttributionValues) GetIncidentManagementMonthlyActiveUsersUsage() float64 {
+ if o == nil || o.IncidentManagementMonthlyActiveUsersUsage == nil {
+ var ret float64
+ return ret
+ }
+ return *o.IncidentManagementMonthlyActiveUsersUsage
+}
+
+// GetIncidentManagementMonthlyActiveUsersUsageOk returns a tuple with the IncidentManagementMonthlyActiveUsersUsage field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *MonthlyUsageAttributionValues) GetIncidentManagementMonthlyActiveUsersUsageOk() (*float64, bool) {
+ if o == nil || o.IncidentManagementMonthlyActiveUsersUsage == nil {
+ return nil, false
+ }
+ return o.IncidentManagementMonthlyActiveUsersUsage, true
+}
+
+// HasIncidentManagementMonthlyActiveUsersUsage returns a boolean if a field has been set.
+func (o *MonthlyUsageAttributionValues) HasIncidentManagementMonthlyActiveUsersUsage() bool {
+ return o != nil && o.IncidentManagementMonthlyActiveUsersUsage != nil
+}
+
+// SetIncidentManagementMonthlyActiveUsersUsage gets a reference to the given float64 and assigns it to the IncidentManagementMonthlyActiveUsersUsage field.
+func (o *MonthlyUsageAttributionValues) SetIncidentManagementMonthlyActiveUsersUsage(v float64) {
+ o.IncidentManagementMonthlyActiveUsersUsage = &v
+}
+
// GetIndexedSpansPercentage returns the IndexedSpansPercentage field value if set, zero value otherwise.
func (o *MonthlyUsageAttributionValues) GetIndexedSpansPercentage() float64 {
if o == nil || o.IndexedSpansPercentage == nil {
@@ -2472,6 +2540,62 @@ func (o *MonthlyUsageAttributionValues) SetLogsIndexed180dayUsage(v float64) {
o.LogsIndexed180dayUsage = &v
}
+// GetLogsIndexed1dayPercentage returns the LogsIndexed1dayPercentage field value if set, zero value otherwise.
+func (o *MonthlyUsageAttributionValues) GetLogsIndexed1dayPercentage() float64 {
+ if o == nil || o.LogsIndexed1dayPercentage == nil {
+ var ret float64
+ return ret
+ }
+ return *o.LogsIndexed1dayPercentage
+}
+
+// GetLogsIndexed1dayPercentageOk returns a tuple with the LogsIndexed1dayPercentage field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *MonthlyUsageAttributionValues) GetLogsIndexed1dayPercentageOk() (*float64, bool) {
+ if o == nil || o.LogsIndexed1dayPercentage == nil {
+ return nil, false
+ }
+ return o.LogsIndexed1dayPercentage, true
+}
+
+// HasLogsIndexed1dayPercentage returns a boolean if a field has been set.
+func (o *MonthlyUsageAttributionValues) HasLogsIndexed1dayPercentage() bool {
+ return o != nil && o.LogsIndexed1dayPercentage != nil
+}
+
+// SetLogsIndexed1dayPercentage gets a reference to the given float64 and assigns it to the LogsIndexed1dayPercentage field.
+func (o *MonthlyUsageAttributionValues) SetLogsIndexed1dayPercentage(v float64) {
+ o.LogsIndexed1dayPercentage = &v
+}
+
+// GetLogsIndexed1dayUsage returns the LogsIndexed1dayUsage field value if set, zero value otherwise.
+func (o *MonthlyUsageAttributionValues) GetLogsIndexed1dayUsage() float64 {
+ if o == nil || o.LogsIndexed1dayUsage == nil {
+ var ret float64
+ return ret
+ }
+ return *o.LogsIndexed1dayUsage
+}
+
+// GetLogsIndexed1dayUsageOk returns a tuple with the LogsIndexed1dayUsage field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *MonthlyUsageAttributionValues) GetLogsIndexed1dayUsageOk() (*float64, bool) {
+ if o == nil || o.LogsIndexed1dayUsage == nil {
+ return nil, false
+ }
+ return o.LogsIndexed1dayUsage, true
+}
+
+// HasLogsIndexed1dayUsage returns a boolean if a field has been set.
+func (o *MonthlyUsageAttributionValues) HasLogsIndexed1dayUsage() bool {
+ return o != nil && o.LogsIndexed1dayUsage != nil
+}
+
+// SetLogsIndexed1dayUsage gets a reference to the given float64 and assigns it to the LogsIndexed1dayUsage field.
+func (o *MonthlyUsageAttributionValues) SetLogsIndexed1dayUsage(v float64) {
+ o.LogsIndexed1dayUsage = &v
+}
+
// GetLogsIndexed30dayPercentage returns the LogsIndexed30dayPercentage field value if set, zero value otherwise.
func (o *MonthlyUsageAttributionValues) GetLogsIndexed30dayPercentage() float64 {
if o == nil || o.LogsIndexed30dayPercentage == nil {
@@ -3200,6 +3324,62 @@ func (o *MonthlyUsageAttributionValues) SetObsPipelinesVcpuUsage(v float64) {
o.ObsPipelinesVcpuUsage = &v
}
+// GetOnlineArchivePercentage returns the OnlineArchivePercentage field value if set, zero value otherwise.
+func (o *MonthlyUsageAttributionValues) GetOnlineArchivePercentage() float64 {
+ if o == nil || o.OnlineArchivePercentage == nil {
+ var ret float64
+ return ret
+ }
+ return *o.OnlineArchivePercentage
+}
+
+// GetOnlineArchivePercentageOk returns a tuple with the OnlineArchivePercentage field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *MonthlyUsageAttributionValues) GetOnlineArchivePercentageOk() (*float64, bool) {
+ if o == nil || o.OnlineArchivePercentage == nil {
+ return nil, false
+ }
+ return o.OnlineArchivePercentage, true
+}
+
+// HasOnlineArchivePercentage returns a boolean if a field has been set.
+func (o *MonthlyUsageAttributionValues) HasOnlineArchivePercentage() bool {
+ return o != nil && o.OnlineArchivePercentage != nil
+}
+
+// SetOnlineArchivePercentage gets a reference to the given float64 and assigns it to the OnlineArchivePercentage field.
+func (o *MonthlyUsageAttributionValues) SetOnlineArchivePercentage(v float64) {
+ o.OnlineArchivePercentage = &v
+}
+
+// GetOnlineArchiveUsage returns the OnlineArchiveUsage field value if set, zero value otherwise.
+func (o *MonthlyUsageAttributionValues) GetOnlineArchiveUsage() float64 {
+ if o == nil || o.OnlineArchiveUsage == nil {
+ var ret float64
+ return ret
+ }
+ return *o.OnlineArchiveUsage
+}
+
+// GetOnlineArchiveUsageOk returns a tuple with the OnlineArchiveUsage field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *MonthlyUsageAttributionValues) GetOnlineArchiveUsageOk() (*float64, bool) {
+ if o == nil || o.OnlineArchiveUsage == nil {
+ return nil, false
+ }
+ return o.OnlineArchiveUsage, true
+}
+
+// HasOnlineArchiveUsage returns a boolean if a field has been set.
+func (o *MonthlyUsageAttributionValues) HasOnlineArchiveUsage() bool {
+ return o != nil && o.OnlineArchiveUsage != nil
+}
+
+// SetOnlineArchiveUsage gets a reference to the given float64 and assigns it to the OnlineArchiveUsage field.
+func (o *MonthlyUsageAttributionValues) SetOnlineArchiveUsage(v float64) {
+ o.OnlineArchiveUsage = &v
+}
+
// GetProfiledContainerPercentage returns the ProfiledContainerPercentage field value if set, zero value otherwise.
func (o *MonthlyUsageAttributionValues) GetProfiledContainerPercentage() float64 {
if o == nil || o.ProfiledContainerPercentage == nil {
@@ -4064,6 +4244,12 @@ func (o MonthlyUsageAttributionValues) MarshalJSON() ([]byte, error) {
if o.FunctionsUsage != nil {
toSerialize["functions_usage"] = o.FunctionsUsage
}
+ if o.IncidentManagementMonthlyActiveUsersPercentage != nil {
+ toSerialize["incident_management_monthly_active_users_percentage"] = o.IncidentManagementMonthlyActiveUsersPercentage
+ }
+ if o.IncidentManagementMonthlyActiveUsersUsage != nil {
+ toSerialize["incident_management_monthly_active_users_usage"] = o.IncidentManagementMonthlyActiveUsersUsage
+ }
if o.IndexedSpansPercentage != nil {
toSerialize["indexed_spans_percentage"] = o.IndexedSpansPercentage
}
@@ -4112,6 +4298,12 @@ func (o MonthlyUsageAttributionValues) MarshalJSON() ([]byte, error) {
if o.LogsIndexed180dayUsage != nil {
toSerialize["logs_indexed_180day_usage"] = o.LogsIndexed180dayUsage
}
+ if o.LogsIndexed1dayPercentage != nil {
+ toSerialize["logs_indexed_1day_percentage"] = o.LogsIndexed1dayPercentage
+ }
+ if o.LogsIndexed1dayUsage != nil {
+ toSerialize["logs_indexed_1day_usage"] = o.LogsIndexed1dayUsage
+ }
if o.LogsIndexed30dayPercentage != nil {
toSerialize["logs_indexed_30day_percentage"] = o.LogsIndexed30dayPercentage
}
@@ -4190,6 +4382,12 @@ func (o MonthlyUsageAttributionValues) MarshalJSON() ([]byte, error) {
if o.ObsPipelinesVcpuUsage != nil {
toSerialize["obs_pipelines_vcpu_usage"] = o.ObsPipelinesVcpuUsage
}
+ if o.OnlineArchivePercentage != nil {
+ toSerialize["online_archive_percentage"] = o.OnlineArchivePercentage
+ }
+ if o.OnlineArchiveUsage != nil {
+ toSerialize["online_archive_usage"] = o.OnlineArchiveUsage
+ }
if o.ProfiledContainerPercentage != nil {
toSerialize["profiled_container_percentage"] = o.ProfiledContainerPercentage
}
@@ -4272,141 +4470,147 @@ func (o MonthlyUsageAttributionValues) MarshalJSON() ([]byte, error) {
// UnmarshalJSON deserializes the given payload.
func (o *MonthlyUsageAttributionValues) UnmarshalJSON(bytes []byte) (err error) {
all := struct {
- ApiPercentage *float64 `json:"api_percentage,omitempty"`
- ApiUsage *float64 `json:"api_usage,omitempty"`
- ApmFargatePercentage *float64 `json:"apm_fargate_percentage,omitempty"`
- ApmFargateUsage *float64 `json:"apm_fargate_usage,omitempty"`
- ApmHostPercentage *float64 `json:"apm_host_percentage,omitempty"`
- ApmHostUsage *float64 `json:"apm_host_usage,omitempty"`
- ApmUsmPercentage *float64 `json:"apm_usm_percentage,omitempty"`
- ApmUsmUsage *float64 `json:"apm_usm_usage,omitempty"`
- AppsecFargatePercentage *float64 `json:"appsec_fargate_percentage,omitempty"`
- AppsecFargateUsage *float64 `json:"appsec_fargate_usage,omitempty"`
- AppsecPercentage *float64 `json:"appsec_percentage,omitempty"`
- AppsecUsage *float64 `json:"appsec_usage,omitempty"`
- AsmServerlessTracedInvocationsPercentage *float64 `json:"asm_serverless_traced_invocations_percentage,omitempty"`
- AsmServerlessTracedInvocationsUsage *float64 `json:"asm_serverless_traced_invocations_usage,omitempty"`
- BrowserPercentage *float64 `json:"browser_percentage,omitempty"`
- BrowserUsage *float64 `json:"browser_usage,omitempty"`
- CiPipelineIndexedSpansPercentage *float64 `json:"ci_pipeline_indexed_spans_percentage,omitempty"`
- CiPipelineIndexedSpansUsage *float64 `json:"ci_pipeline_indexed_spans_usage,omitempty"`
- CiTestIndexedSpansPercentage *float64 `json:"ci_test_indexed_spans_percentage,omitempty"`
- CiTestIndexedSpansUsage *float64 `json:"ci_test_indexed_spans_usage,omitempty"`
- CiVisibilityItrPercentage *float64 `json:"ci_visibility_itr_percentage,omitempty"`
- CiVisibilityItrUsage *float64 `json:"ci_visibility_itr_usage,omitempty"`
- CloudSiemPercentage *float64 `json:"cloud_siem_percentage,omitempty"`
- CloudSiemUsage *float64 `json:"cloud_siem_usage,omitempty"`
- ContainerExclAgentPercentage *float64 `json:"container_excl_agent_percentage,omitempty"`
- ContainerExclAgentUsage *float64 `json:"container_excl_agent_usage,omitempty"`
- ContainerPercentage *float64 `json:"container_percentage,omitempty"`
- ContainerUsage *float64 `json:"container_usage,omitempty"`
- CspmContainersPercentage *float64 `json:"cspm_containers_percentage,omitempty"`
- CspmContainersUsage *float64 `json:"cspm_containers_usage,omitempty"`
- CspmHostsPercentage *float64 `json:"cspm_hosts_percentage,omitempty"`
- CspmHostsUsage *float64 `json:"cspm_hosts_usage,omitempty"`
- CustomEventPercentage *float64 `json:"custom_event_percentage,omitempty"`
- CustomEventUsage *float64 `json:"custom_event_usage,omitempty"`
- CustomIngestedTimeseriesPercentage *float64 `json:"custom_ingested_timeseries_percentage,omitempty"`
- CustomIngestedTimeseriesUsage *float64 `json:"custom_ingested_timeseries_usage,omitempty"`
- CustomTimeseriesPercentage *float64 `json:"custom_timeseries_percentage,omitempty"`
- CustomTimeseriesUsage *float64 `json:"custom_timeseries_usage,omitempty"`
- CwsContainersPercentage *float64 `json:"cws_containers_percentage,omitempty"`
- CwsContainersUsage *float64 `json:"cws_containers_usage,omitempty"`
- CwsHostsPercentage *float64 `json:"cws_hosts_percentage,omitempty"`
- CwsHostsUsage *float64 `json:"cws_hosts_usage,omitempty"`
- DbmHostsPercentage *float64 `json:"dbm_hosts_percentage,omitempty"`
- DbmHostsUsage *float64 `json:"dbm_hosts_usage,omitempty"`
- DbmQueriesPercentage *float64 `json:"dbm_queries_percentage,omitempty"`
- DbmQueriesUsage *float64 `json:"dbm_queries_usage,omitempty"`
- ErrorTrackingPercentage *float64 `json:"error_tracking_percentage,omitempty"`
- ErrorTrackingUsage *float64 `json:"error_tracking_usage,omitempty"`
- EstimatedIndexedLogsPercentage *float64 `json:"estimated_indexed_logs_percentage,omitempty"`
- EstimatedIndexedLogsUsage *float64 `json:"estimated_indexed_logs_usage,omitempty"`
- EstimatedIndexedSpansPercentage *float64 `json:"estimated_indexed_spans_percentage,omitempty"`
- EstimatedIndexedSpansUsage *float64 `json:"estimated_indexed_spans_usage,omitempty"`
- EstimatedIngestedLogsPercentage *float64 `json:"estimated_ingested_logs_percentage,omitempty"`
- EstimatedIngestedLogsUsage *float64 `json:"estimated_ingested_logs_usage,omitempty"`
- EstimatedIngestedSpansPercentage *float64 `json:"estimated_ingested_spans_percentage,omitempty"`
- EstimatedIngestedSpansUsage *float64 `json:"estimated_ingested_spans_usage,omitempty"`
- EstimatedRumSessionsPercentage *float64 `json:"estimated_rum_sessions_percentage,omitempty"`
- EstimatedRumSessionsUsage *float64 `json:"estimated_rum_sessions_usage,omitempty"`
- FargatePercentage *float64 `json:"fargate_percentage,omitempty"`
- FargateUsage *float64 `json:"fargate_usage,omitempty"`
- FunctionsPercentage *float64 `json:"functions_percentage,omitempty"`
- FunctionsUsage *float64 `json:"functions_usage,omitempty"`
- IndexedSpansPercentage *float64 `json:"indexed_spans_percentage,omitempty"`
- IndexedSpansUsage *float64 `json:"indexed_spans_usage,omitempty"`
- InfraHostPercentage *float64 `json:"infra_host_percentage,omitempty"`
- InfraHostUsage *float64 `json:"infra_host_usage,omitempty"`
- IngestedLogsBytesPercentage *float64 `json:"ingested_logs_bytes_percentage,omitempty"`
- IngestedLogsBytesUsage *float64 `json:"ingested_logs_bytes_usage,omitempty"`
- IngestedSpansBytesPercentage *float64 `json:"ingested_spans_bytes_percentage,omitempty"`
- IngestedSpansBytesUsage *float64 `json:"ingested_spans_bytes_usage,omitempty"`
- InvocationsPercentage *float64 `json:"invocations_percentage,omitempty"`
- InvocationsUsage *float64 `json:"invocations_usage,omitempty"`
- LambdaTracedInvocationsPercentage *float64 `json:"lambda_traced_invocations_percentage,omitempty"`
- LambdaTracedInvocationsUsage *float64 `json:"lambda_traced_invocations_usage,omitempty"`
- LogsIndexed15dayPercentage *float64 `json:"logs_indexed_15day_percentage,omitempty"`
- LogsIndexed15dayUsage *float64 `json:"logs_indexed_15day_usage,omitempty"`
- LogsIndexed180dayPercentage *float64 `json:"logs_indexed_180day_percentage,omitempty"`
- LogsIndexed180dayUsage *float64 `json:"logs_indexed_180day_usage,omitempty"`
- LogsIndexed30dayPercentage *float64 `json:"logs_indexed_30day_percentage,omitempty"`
- LogsIndexed30dayUsage *float64 `json:"logs_indexed_30day_usage,omitempty"`
- LogsIndexed360dayPercentage *float64 `json:"logs_indexed_360day_percentage,omitempty"`
- LogsIndexed360dayUsage *float64 `json:"logs_indexed_360day_usage,omitempty"`
- LogsIndexed3dayPercentage *float64 `json:"logs_indexed_3day_percentage,omitempty"`
- LogsIndexed3dayUsage *float64 `json:"logs_indexed_3day_usage,omitempty"`
- LogsIndexed45dayPercentage *float64 `json:"logs_indexed_45day_percentage,omitempty"`
- LogsIndexed45dayUsage *float64 `json:"logs_indexed_45day_usage,omitempty"`
- LogsIndexed60dayPercentage *float64 `json:"logs_indexed_60day_percentage,omitempty"`
- LogsIndexed60dayUsage *float64 `json:"logs_indexed_60day_usage,omitempty"`
- LogsIndexed7dayPercentage *float64 `json:"logs_indexed_7day_percentage,omitempty"`
- LogsIndexed7dayUsage *float64 `json:"logs_indexed_7day_usage,omitempty"`
- LogsIndexed90dayPercentage *float64 `json:"logs_indexed_90day_percentage,omitempty"`
- LogsIndexed90dayUsage *float64 `json:"logs_indexed_90day_usage,omitempty"`
- LogsIndexedCustomRetentionPercentage *float64 `json:"logs_indexed_custom_retention_percentage,omitempty"`
- LogsIndexedCustomRetentionUsage *float64 `json:"logs_indexed_custom_retention_usage,omitempty"`
- MobileAppTestingPercentage *float64 `json:"mobile_app_testing_percentage,omitempty"`
- MobileAppTestingUsage *float64 `json:"mobile_app_testing_usage,omitempty"`
- NdmNetflowPercentage *float64 `json:"ndm_netflow_percentage,omitempty"`
- NdmNetflowUsage *float64 `json:"ndm_netflow_usage,omitempty"`
- NpmHostPercentage *float64 `json:"npm_host_percentage,omitempty"`
- NpmHostUsage *float64 `json:"npm_host_usage,omitempty"`
- ObsPipelineBytesPercentage *float64 `json:"obs_pipeline_bytes_percentage,omitempty"`
- ObsPipelineBytesUsage *float64 `json:"obs_pipeline_bytes_usage,omitempty"`
- ObsPipelinesVcpuPercentage *float64 `json:"obs_pipelines_vcpu_percentage,omitempty"`
- ObsPipelinesVcpuUsage *float64 `json:"obs_pipelines_vcpu_usage,omitempty"`
- ProfiledContainerPercentage *float64 `json:"profiled_container_percentage,omitempty"`
- ProfiledContainerUsage *float64 `json:"profiled_container_usage,omitempty"`
- ProfiledFargatePercentage *float64 `json:"profiled_fargate_percentage,omitempty"`
- ProfiledFargateUsage *float64 `json:"profiled_fargate_usage,omitempty"`
- ProfiledHostPercentage *float64 `json:"profiled_host_percentage,omitempty"`
- ProfiledHostUsage *float64 `json:"profiled_host_usage,omitempty"`
- RumBrowserMobileSessionsPercentage *float64 `json:"rum_browser_mobile_sessions_percentage,omitempty"`
- RumBrowserMobileSessionsUsage *float64 `json:"rum_browser_mobile_sessions_usage,omitempty"`
- RumReplaySessionsPercentage *float64 `json:"rum_replay_sessions_percentage,omitempty"`
- RumReplaySessionsUsage *float64 `json:"rum_replay_sessions_usage,omitempty"`
- SdsScannedBytesPercentage *float64 `json:"sds_scanned_bytes_percentage,omitempty"`
- SdsScannedBytesUsage *float64 `json:"sds_scanned_bytes_usage,omitempty"`
- ServerlessAppsPercentage *float64 `json:"serverless_apps_percentage,omitempty"`
- ServerlessAppsUsage *float64 `json:"serverless_apps_usage,omitempty"`
- SiemIngestedBytesPercentage *float64 `json:"siem_ingested_bytes_percentage,omitempty"`
- SiemIngestedBytesUsage *float64 `json:"siem_ingested_bytes_usage,omitempty"`
- SnmpPercentage *float64 `json:"snmp_percentage,omitempty"`
- SnmpUsage *float64 `json:"snmp_usage,omitempty"`
- UniversalServiceMonitoringPercentage *float64 `json:"universal_service_monitoring_percentage,omitempty"`
- UniversalServiceMonitoringUsage *float64 `json:"universal_service_monitoring_usage,omitempty"`
- VulnManagementHostsPercentage *float64 `json:"vuln_management_hosts_percentage,omitempty"`
- VulnManagementHostsUsage *float64 `json:"vuln_management_hosts_usage,omitempty"`
- WorkflowExecutionsPercentage *float64 `json:"workflow_executions_percentage,omitempty"`
- WorkflowExecutionsUsage *float64 `json:"workflow_executions_usage,omitempty"`
+ ApiPercentage *float64 `json:"api_percentage,omitempty"`
+ ApiUsage *float64 `json:"api_usage,omitempty"`
+ ApmFargatePercentage *float64 `json:"apm_fargate_percentage,omitempty"`
+ ApmFargateUsage *float64 `json:"apm_fargate_usage,omitempty"`
+ ApmHostPercentage *float64 `json:"apm_host_percentage,omitempty"`
+ ApmHostUsage *float64 `json:"apm_host_usage,omitempty"`
+ ApmUsmPercentage *float64 `json:"apm_usm_percentage,omitempty"`
+ ApmUsmUsage *float64 `json:"apm_usm_usage,omitempty"`
+ AppsecFargatePercentage *float64 `json:"appsec_fargate_percentage,omitempty"`
+ AppsecFargateUsage *float64 `json:"appsec_fargate_usage,omitempty"`
+ AppsecPercentage *float64 `json:"appsec_percentage,omitempty"`
+ AppsecUsage *float64 `json:"appsec_usage,omitempty"`
+ AsmServerlessTracedInvocationsPercentage *float64 `json:"asm_serverless_traced_invocations_percentage,omitempty"`
+ AsmServerlessTracedInvocationsUsage *float64 `json:"asm_serverless_traced_invocations_usage,omitempty"`
+ BrowserPercentage *float64 `json:"browser_percentage,omitempty"`
+ BrowserUsage *float64 `json:"browser_usage,omitempty"`
+ CiPipelineIndexedSpansPercentage *float64 `json:"ci_pipeline_indexed_spans_percentage,omitempty"`
+ CiPipelineIndexedSpansUsage *float64 `json:"ci_pipeline_indexed_spans_usage,omitempty"`
+ CiTestIndexedSpansPercentage *float64 `json:"ci_test_indexed_spans_percentage,omitempty"`
+ CiTestIndexedSpansUsage *float64 `json:"ci_test_indexed_spans_usage,omitempty"`
+ CiVisibilityItrPercentage *float64 `json:"ci_visibility_itr_percentage,omitempty"`
+ CiVisibilityItrUsage *float64 `json:"ci_visibility_itr_usage,omitempty"`
+ CloudSiemPercentage *float64 `json:"cloud_siem_percentage,omitempty"`
+ CloudSiemUsage *float64 `json:"cloud_siem_usage,omitempty"`
+ ContainerExclAgentPercentage *float64 `json:"container_excl_agent_percentage,omitempty"`
+ ContainerExclAgentUsage *float64 `json:"container_excl_agent_usage,omitempty"`
+ ContainerPercentage *float64 `json:"container_percentage,omitempty"`
+ ContainerUsage *float64 `json:"container_usage,omitempty"`
+ CspmContainersPercentage *float64 `json:"cspm_containers_percentage,omitempty"`
+ CspmContainersUsage *float64 `json:"cspm_containers_usage,omitempty"`
+ CspmHostsPercentage *float64 `json:"cspm_hosts_percentage,omitempty"`
+ CspmHostsUsage *float64 `json:"cspm_hosts_usage,omitempty"`
+ CustomEventPercentage *float64 `json:"custom_event_percentage,omitempty"`
+ CustomEventUsage *float64 `json:"custom_event_usage,omitempty"`
+ CustomIngestedTimeseriesPercentage *float64 `json:"custom_ingested_timeseries_percentage,omitempty"`
+ CustomIngestedTimeseriesUsage *float64 `json:"custom_ingested_timeseries_usage,omitempty"`
+ CustomTimeseriesPercentage *float64 `json:"custom_timeseries_percentage,omitempty"`
+ CustomTimeseriesUsage *float64 `json:"custom_timeseries_usage,omitempty"`
+ CwsContainersPercentage *float64 `json:"cws_containers_percentage,omitempty"`
+ CwsContainersUsage *float64 `json:"cws_containers_usage,omitempty"`
+ CwsHostsPercentage *float64 `json:"cws_hosts_percentage,omitempty"`
+ CwsHostsUsage *float64 `json:"cws_hosts_usage,omitempty"`
+ DbmHostsPercentage *float64 `json:"dbm_hosts_percentage,omitempty"`
+ DbmHostsUsage *float64 `json:"dbm_hosts_usage,omitempty"`
+ DbmQueriesPercentage *float64 `json:"dbm_queries_percentage,omitempty"`
+ DbmQueriesUsage *float64 `json:"dbm_queries_usage,omitempty"`
+ ErrorTrackingPercentage *float64 `json:"error_tracking_percentage,omitempty"`
+ ErrorTrackingUsage *float64 `json:"error_tracking_usage,omitempty"`
+ EstimatedIndexedLogsPercentage *float64 `json:"estimated_indexed_logs_percentage,omitempty"`
+ EstimatedIndexedLogsUsage *float64 `json:"estimated_indexed_logs_usage,omitempty"`
+ EstimatedIndexedSpansPercentage *float64 `json:"estimated_indexed_spans_percentage,omitempty"`
+ EstimatedIndexedSpansUsage *float64 `json:"estimated_indexed_spans_usage,omitempty"`
+ EstimatedIngestedLogsPercentage *float64 `json:"estimated_ingested_logs_percentage,omitempty"`
+ EstimatedIngestedLogsUsage *float64 `json:"estimated_ingested_logs_usage,omitempty"`
+ EstimatedIngestedSpansPercentage *float64 `json:"estimated_ingested_spans_percentage,omitempty"`
+ EstimatedIngestedSpansUsage *float64 `json:"estimated_ingested_spans_usage,omitempty"`
+ EstimatedRumSessionsPercentage *float64 `json:"estimated_rum_sessions_percentage,omitempty"`
+ EstimatedRumSessionsUsage *float64 `json:"estimated_rum_sessions_usage,omitempty"`
+ FargatePercentage *float64 `json:"fargate_percentage,omitempty"`
+ FargateUsage *float64 `json:"fargate_usage,omitempty"`
+ FunctionsPercentage *float64 `json:"functions_percentage,omitempty"`
+ FunctionsUsage *float64 `json:"functions_usage,omitempty"`
+ IncidentManagementMonthlyActiveUsersPercentage *float64 `json:"incident_management_monthly_active_users_percentage,omitempty"`
+ IncidentManagementMonthlyActiveUsersUsage *float64 `json:"incident_management_monthly_active_users_usage,omitempty"`
+ IndexedSpansPercentage *float64 `json:"indexed_spans_percentage,omitempty"`
+ IndexedSpansUsage *float64 `json:"indexed_spans_usage,omitempty"`
+ InfraHostPercentage *float64 `json:"infra_host_percentage,omitempty"`
+ InfraHostUsage *float64 `json:"infra_host_usage,omitempty"`
+ IngestedLogsBytesPercentage *float64 `json:"ingested_logs_bytes_percentage,omitempty"`
+ IngestedLogsBytesUsage *float64 `json:"ingested_logs_bytes_usage,omitempty"`
+ IngestedSpansBytesPercentage *float64 `json:"ingested_spans_bytes_percentage,omitempty"`
+ IngestedSpansBytesUsage *float64 `json:"ingested_spans_bytes_usage,omitempty"`
+ InvocationsPercentage *float64 `json:"invocations_percentage,omitempty"`
+ InvocationsUsage *float64 `json:"invocations_usage,omitempty"`
+ LambdaTracedInvocationsPercentage *float64 `json:"lambda_traced_invocations_percentage,omitempty"`
+ LambdaTracedInvocationsUsage *float64 `json:"lambda_traced_invocations_usage,omitempty"`
+ LogsIndexed15dayPercentage *float64 `json:"logs_indexed_15day_percentage,omitempty"`
+ LogsIndexed15dayUsage *float64 `json:"logs_indexed_15day_usage,omitempty"`
+ LogsIndexed180dayPercentage *float64 `json:"logs_indexed_180day_percentage,omitempty"`
+ LogsIndexed180dayUsage *float64 `json:"logs_indexed_180day_usage,omitempty"`
+ LogsIndexed1dayPercentage *float64 `json:"logs_indexed_1day_percentage,omitempty"`
+ LogsIndexed1dayUsage *float64 `json:"logs_indexed_1day_usage,omitempty"`
+ LogsIndexed30dayPercentage *float64 `json:"logs_indexed_30day_percentage,omitempty"`
+ LogsIndexed30dayUsage *float64 `json:"logs_indexed_30day_usage,omitempty"`
+ LogsIndexed360dayPercentage *float64 `json:"logs_indexed_360day_percentage,omitempty"`
+ LogsIndexed360dayUsage *float64 `json:"logs_indexed_360day_usage,omitempty"`
+ LogsIndexed3dayPercentage *float64 `json:"logs_indexed_3day_percentage,omitempty"`
+ LogsIndexed3dayUsage *float64 `json:"logs_indexed_3day_usage,omitempty"`
+ LogsIndexed45dayPercentage *float64 `json:"logs_indexed_45day_percentage,omitempty"`
+ LogsIndexed45dayUsage *float64 `json:"logs_indexed_45day_usage,omitempty"`
+ LogsIndexed60dayPercentage *float64 `json:"logs_indexed_60day_percentage,omitempty"`
+ LogsIndexed60dayUsage *float64 `json:"logs_indexed_60day_usage,omitempty"`
+ LogsIndexed7dayPercentage *float64 `json:"logs_indexed_7day_percentage,omitempty"`
+ LogsIndexed7dayUsage *float64 `json:"logs_indexed_7day_usage,omitempty"`
+ LogsIndexed90dayPercentage *float64 `json:"logs_indexed_90day_percentage,omitempty"`
+ LogsIndexed90dayUsage *float64 `json:"logs_indexed_90day_usage,omitempty"`
+ LogsIndexedCustomRetentionPercentage *float64 `json:"logs_indexed_custom_retention_percentage,omitempty"`
+ LogsIndexedCustomRetentionUsage *float64 `json:"logs_indexed_custom_retention_usage,omitempty"`
+ MobileAppTestingPercentage *float64 `json:"mobile_app_testing_percentage,omitempty"`
+ MobileAppTestingUsage *float64 `json:"mobile_app_testing_usage,omitempty"`
+ NdmNetflowPercentage *float64 `json:"ndm_netflow_percentage,omitempty"`
+ NdmNetflowUsage *float64 `json:"ndm_netflow_usage,omitempty"`
+ NpmHostPercentage *float64 `json:"npm_host_percentage,omitempty"`
+ NpmHostUsage *float64 `json:"npm_host_usage,omitempty"`
+ ObsPipelineBytesPercentage *float64 `json:"obs_pipeline_bytes_percentage,omitempty"`
+ ObsPipelineBytesUsage *float64 `json:"obs_pipeline_bytes_usage,omitempty"`
+ ObsPipelinesVcpuPercentage *float64 `json:"obs_pipelines_vcpu_percentage,omitempty"`
+ ObsPipelinesVcpuUsage *float64 `json:"obs_pipelines_vcpu_usage,omitempty"`
+ OnlineArchivePercentage *float64 `json:"online_archive_percentage,omitempty"`
+ OnlineArchiveUsage *float64 `json:"online_archive_usage,omitempty"`
+ ProfiledContainerPercentage *float64 `json:"profiled_container_percentage,omitempty"`
+ ProfiledContainerUsage *float64 `json:"profiled_container_usage,omitempty"`
+ ProfiledFargatePercentage *float64 `json:"profiled_fargate_percentage,omitempty"`
+ ProfiledFargateUsage *float64 `json:"profiled_fargate_usage,omitempty"`
+ ProfiledHostPercentage *float64 `json:"profiled_host_percentage,omitempty"`
+ ProfiledHostUsage *float64 `json:"profiled_host_usage,omitempty"`
+ RumBrowserMobileSessionsPercentage *float64 `json:"rum_browser_mobile_sessions_percentage,omitempty"`
+ RumBrowserMobileSessionsUsage *float64 `json:"rum_browser_mobile_sessions_usage,omitempty"`
+ RumReplaySessionsPercentage *float64 `json:"rum_replay_sessions_percentage,omitempty"`
+ RumReplaySessionsUsage *float64 `json:"rum_replay_sessions_usage,omitempty"`
+ SdsScannedBytesPercentage *float64 `json:"sds_scanned_bytes_percentage,omitempty"`
+ SdsScannedBytesUsage *float64 `json:"sds_scanned_bytes_usage,omitempty"`
+ ServerlessAppsPercentage *float64 `json:"serverless_apps_percentage,omitempty"`
+ ServerlessAppsUsage *float64 `json:"serverless_apps_usage,omitempty"`
+ SiemIngestedBytesPercentage *float64 `json:"siem_ingested_bytes_percentage,omitempty"`
+ SiemIngestedBytesUsage *float64 `json:"siem_ingested_bytes_usage,omitempty"`
+ SnmpPercentage *float64 `json:"snmp_percentage,omitempty"`
+ SnmpUsage *float64 `json:"snmp_usage,omitempty"`
+ UniversalServiceMonitoringPercentage *float64 `json:"universal_service_monitoring_percentage,omitempty"`
+ UniversalServiceMonitoringUsage *float64 `json:"universal_service_monitoring_usage,omitempty"`
+ VulnManagementHostsPercentage *float64 `json:"vuln_management_hosts_percentage,omitempty"`
+ VulnManagementHostsUsage *float64 `json:"vuln_management_hosts_usage,omitempty"`
+ WorkflowExecutionsPercentage *float64 `json:"workflow_executions_percentage,omitempty"`
+ WorkflowExecutionsUsage *float64 `json:"workflow_executions_usage,omitempty"`
}{}
if err = datadog.Unmarshal(bytes, &all); err != nil {
return datadog.Unmarshal(bytes, &o.UnparsedObject)
}
additionalProperties := make(map[string]interface{})
if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
- datadog.DeleteKeys(additionalProperties, &[]string{"api_percentage", "api_usage", "apm_fargate_percentage", "apm_fargate_usage", "apm_host_percentage", "apm_host_usage", "apm_usm_percentage", "apm_usm_usage", "appsec_fargate_percentage", "appsec_fargate_usage", "appsec_percentage", "appsec_usage", "asm_serverless_traced_invocations_percentage", "asm_serverless_traced_invocations_usage", "browser_percentage", "browser_usage", "ci_pipeline_indexed_spans_percentage", "ci_pipeline_indexed_spans_usage", "ci_test_indexed_spans_percentage", "ci_test_indexed_spans_usage", "ci_visibility_itr_percentage", "ci_visibility_itr_usage", "cloud_siem_percentage", "cloud_siem_usage", "container_excl_agent_percentage", "container_excl_agent_usage", "container_percentage", "container_usage", "cspm_containers_percentage", "cspm_containers_usage", "cspm_hosts_percentage", "cspm_hosts_usage", "custom_event_percentage", "custom_event_usage", "custom_ingested_timeseries_percentage", "custom_ingested_timeseries_usage", "custom_timeseries_percentage", "custom_timeseries_usage", "cws_containers_percentage", "cws_containers_usage", "cws_hosts_percentage", "cws_hosts_usage", "dbm_hosts_percentage", "dbm_hosts_usage", "dbm_queries_percentage", "dbm_queries_usage", "error_tracking_percentage", "error_tracking_usage", "estimated_indexed_logs_percentage", "estimated_indexed_logs_usage", "estimated_indexed_spans_percentage", "estimated_indexed_spans_usage", "estimated_ingested_logs_percentage", "estimated_ingested_logs_usage", "estimated_ingested_spans_percentage", "estimated_ingested_spans_usage", "estimated_rum_sessions_percentage", "estimated_rum_sessions_usage", "fargate_percentage", "fargate_usage", "functions_percentage", "functions_usage", "indexed_spans_percentage", "indexed_spans_usage", "infra_host_percentage", "infra_host_usage", "ingested_logs_bytes_percentage", "ingested_logs_bytes_usage", "ingested_spans_bytes_percentage", "ingested_spans_bytes_usage", "invocations_percentage", "invocations_usage", "lambda_traced_invocations_percentage", "lambda_traced_invocations_usage", "logs_indexed_15day_percentage", "logs_indexed_15day_usage", "logs_indexed_180day_percentage", "logs_indexed_180day_usage", "logs_indexed_30day_percentage", "logs_indexed_30day_usage", "logs_indexed_360day_percentage", "logs_indexed_360day_usage", "logs_indexed_3day_percentage", "logs_indexed_3day_usage", "logs_indexed_45day_percentage", "logs_indexed_45day_usage", "logs_indexed_60day_percentage", "logs_indexed_60day_usage", "logs_indexed_7day_percentage", "logs_indexed_7day_usage", "logs_indexed_90day_percentage", "logs_indexed_90day_usage", "logs_indexed_custom_retention_percentage", "logs_indexed_custom_retention_usage", "mobile_app_testing_percentage", "mobile_app_testing_usage", "ndm_netflow_percentage", "ndm_netflow_usage", "npm_host_percentage", "npm_host_usage", "obs_pipeline_bytes_percentage", "obs_pipeline_bytes_usage", "obs_pipelines_vcpu_percentage", "obs_pipelines_vcpu_usage", "profiled_container_percentage", "profiled_container_usage", "profiled_fargate_percentage", "profiled_fargate_usage", "profiled_host_percentage", "profiled_host_usage", "rum_browser_mobile_sessions_percentage", "rum_browser_mobile_sessions_usage", "rum_replay_sessions_percentage", "rum_replay_sessions_usage", "sds_scanned_bytes_percentage", "sds_scanned_bytes_usage", "serverless_apps_percentage", "serverless_apps_usage", "siem_ingested_bytes_percentage", "siem_ingested_bytes_usage", "snmp_percentage", "snmp_usage", "universal_service_monitoring_percentage", "universal_service_monitoring_usage", "vuln_management_hosts_percentage", "vuln_management_hosts_usage", "workflow_executions_percentage", "workflow_executions_usage"})
+ datadog.DeleteKeys(additionalProperties, &[]string{"api_percentage", "api_usage", "apm_fargate_percentage", "apm_fargate_usage", "apm_host_percentage", "apm_host_usage", "apm_usm_percentage", "apm_usm_usage", "appsec_fargate_percentage", "appsec_fargate_usage", "appsec_percentage", "appsec_usage", "asm_serverless_traced_invocations_percentage", "asm_serverless_traced_invocations_usage", "browser_percentage", "browser_usage", "ci_pipeline_indexed_spans_percentage", "ci_pipeline_indexed_spans_usage", "ci_test_indexed_spans_percentage", "ci_test_indexed_spans_usage", "ci_visibility_itr_percentage", "ci_visibility_itr_usage", "cloud_siem_percentage", "cloud_siem_usage", "container_excl_agent_percentage", "container_excl_agent_usage", "container_percentage", "container_usage", "cspm_containers_percentage", "cspm_containers_usage", "cspm_hosts_percentage", "cspm_hosts_usage", "custom_event_percentage", "custom_event_usage", "custom_ingested_timeseries_percentage", "custom_ingested_timeseries_usage", "custom_timeseries_percentage", "custom_timeseries_usage", "cws_containers_percentage", "cws_containers_usage", "cws_hosts_percentage", "cws_hosts_usage", "dbm_hosts_percentage", "dbm_hosts_usage", "dbm_queries_percentage", "dbm_queries_usage", "error_tracking_percentage", "error_tracking_usage", "estimated_indexed_logs_percentage", "estimated_indexed_logs_usage", "estimated_indexed_spans_percentage", "estimated_indexed_spans_usage", "estimated_ingested_logs_percentage", "estimated_ingested_logs_usage", "estimated_ingested_spans_percentage", "estimated_ingested_spans_usage", "estimated_rum_sessions_percentage", "estimated_rum_sessions_usage", "fargate_percentage", "fargate_usage", "functions_percentage", "functions_usage", "incident_management_monthly_active_users_percentage", "incident_management_monthly_active_users_usage", "indexed_spans_percentage", "indexed_spans_usage", "infra_host_percentage", "infra_host_usage", "ingested_logs_bytes_percentage", "ingested_logs_bytes_usage", "ingested_spans_bytes_percentage", "ingested_spans_bytes_usage", "invocations_percentage", "invocations_usage", "lambda_traced_invocations_percentage", "lambda_traced_invocations_usage", "logs_indexed_15day_percentage", "logs_indexed_15day_usage", "logs_indexed_180day_percentage", "logs_indexed_180day_usage", "logs_indexed_1day_percentage", "logs_indexed_1day_usage", "logs_indexed_30day_percentage", "logs_indexed_30day_usage", "logs_indexed_360day_percentage", "logs_indexed_360day_usage", "logs_indexed_3day_percentage", "logs_indexed_3day_usage", "logs_indexed_45day_percentage", "logs_indexed_45day_usage", "logs_indexed_60day_percentage", "logs_indexed_60day_usage", "logs_indexed_7day_percentage", "logs_indexed_7day_usage", "logs_indexed_90day_percentage", "logs_indexed_90day_usage", "logs_indexed_custom_retention_percentage", "logs_indexed_custom_retention_usage", "mobile_app_testing_percentage", "mobile_app_testing_usage", "ndm_netflow_percentage", "ndm_netflow_usage", "npm_host_percentage", "npm_host_usage", "obs_pipeline_bytes_percentage", "obs_pipeline_bytes_usage", "obs_pipelines_vcpu_percentage", "obs_pipelines_vcpu_usage", "online_archive_percentage", "online_archive_usage", "profiled_container_percentage", "profiled_container_usage", "profiled_fargate_percentage", "profiled_fargate_usage", "profiled_host_percentage", "profiled_host_usage", "rum_browser_mobile_sessions_percentage", "rum_browser_mobile_sessions_usage", "rum_replay_sessions_percentage", "rum_replay_sessions_usage", "sds_scanned_bytes_percentage", "sds_scanned_bytes_usage", "serverless_apps_percentage", "serverless_apps_usage", "siem_ingested_bytes_percentage", "siem_ingested_bytes_usage", "snmp_percentage", "snmp_usage", "universal_service_monitoring_percentage", "universal_service_monitoring_usage", "vuln_management_hosts_percentage", "vuln_management_hosts_usage", "workflow_executions_percentage", "workflow_executions_usage"})
} else {
return err
}
@@ -4472,6 +4676,8 @@ func (o *MonthlyUsageAttributionValues) UnmarshalJSON(bytes []byte) (err error)
o.FargateUsage = all.FargateUsage
o.FunctionsPercentage = all.FunctionsPercentage
o.FunctionsUsage = all.FunctionsUsage
+ o.IncidentManagementMonthlyActiveUsersPercentage = all.IncidentManagementMonthlyActiveUsersPercentage
+ o.IncidentManagementMonthlyActiveUsersUsage = all.IncidentManagementMonthlyActiveUsersUsage
o.IndexedSpansPercentage = all.IndexedSpansPercentage
o.IndexedSpansUsage = all.IndexedSpansUsage
o.InfraHostPercentage = all.InfraHostPercentage
@@ -4488,6 +4694,8 @@ func (o *MonthlyUsageAttributionValues) UnmarshalJSON(bytes []byte) (err error)
o.LogsIndexed15dayUsage = all.LogsIndexed15dayUsage
o.LogsIndexed180dayPercentage = all.LogsIndexed180dayPercentage
o.LogsIndexed180dayUsage = all.LogsIndexed180dayUsage
+ o.LogsIndexed1dayPercentage = all.LogsIndexed1dayPercentage
+ o.LogsIndexed1dayUsage = all.LogsIndexed1dayUsage
o.LogsIndexed30dayPercentage = all.LogsIndexed30dayPercentage
o.LogsIndexed30dayUsage = all.LogsIndexed30dayUsage
o.LogsIndexed360dayPercentage = all.LogsIndexed360dayPercentage
@@ -4514,6 +4722,8 @@ func (o *MonthlyUsageAttributionValues) UnmarshalJSON(bytes []byte) (err error)
o.ObsPipelineBytesUsage = all.ObsPipelineBytesUsage
o.ObsPipelinesVcpuPercentage = all.ObsPipelinesVcpuPercentage
o.ObsPipelinesVcpuUsage = all.ObsPipelinesVcpuUsage
+ o.OnlineArchivePercentage = all.OnlineArchivePercentage
+ o.OnlineArchiveUsage = all.OnlineArchiveUsage
o.ProfiledContainerPercentage = all.ProfiledContainerPercentage
o.ProfiledContainerUsage = all.ProfiledContainerUsage
o.ProfiledFargatePercentage = all.ProfiledFargatePercentage
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_series.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_series.go
index 8be82885ae..18cf4bd2b3 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_series.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_series.go
@@ -15,7 +15,7 @@ import (
type Series struct {
// The name of the host that produced the metric.
Host *string `json:"host,omitempty"`
- // If the type of the metric is rate or count, define the corresponding interval.
+ // If the type of the metric is rate or count, define the corresponding interval in seconds.
Interval datadog.NullableInt64 `json:"interval,omitempty"`
// The name of the timeseries.
Metric string `json:"metric"`
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_service_level_objective.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_service_level_objective.go
index af60fd447e..eb38ccc0f4 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_service_level_objective.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_service_level_objective.go
@@ -66,7 +66,8 @@ type ServiceLevelObjective struct {
// The thresholds (timeframes and associated targets) for this service level
// objective object.
Thresholds []SLOThreshold `json:"thresholds"`
- // The SLO time window options.
+ // The SLO time window options. Note that "custom" is not a valid option for creating
+ // or updating SLOs. It is only used when querying SLO history over custom timeframes.
Timeframe *SLOTimeframe `json:"timeframe,omitempty"`
// The type of the service level objective.
Type SLOType `json:"type"`
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_service_level_objective_request.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_service_level_objective_request.go
index f5fbf5f30e..50c1b8357b 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_service_level_objective_request.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_service_level_objective_request.go
@@ -45,7 +45,8 @@ type ServiceLevelObjectiveRequest struct {
// The thresholds (timeframes and associated targets) for this service level
// objective object.
Thresholds []SLOThreshold `json:"thresholds"`
- // The SLO time window options.
+ // The SLO time window options. Note that "custom" is not a valid option for creating
+ // or updating SLOs. It is only used when querying SLO history over custom timeframes.
Timeframe *SLOTimeframe `json:"timeframe,omitempty"`
// The type of the service level objective.
Type SLOType `json:"type"`
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_history_monitor.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_history_monitor.go
index f055f0c426..a5a6189ea6 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_history_monitor.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_history_monitor.go
@@ -17,7 +17,13 @@ type SLOHistoryMonitor struct {
Errors []SLOHistoryResponseErrorWithType `json:"errors,omitempty"`
// For groups in a grouped SLO, this is the group name.
Group *string `json:"group,omitempty"`
- // For `monitor` based SLOs, this includes the aggregated history as arrays that include timeseries and uptime data where `0=monitor` is in `OK` state and `1=monitor` is in `alert` state.
+ // The state transition history for the monitor. It is represented as
+ // an array of pairs. Each pair is an array containing the timestamp of the transition
+ // as an integer in Unix epoch format in the first element, and the state as an integer in the
+ // second element. An integer value of `0` for state means uptime, `1` means downtime, and `2` means no data.
+ // Periods of no data are counted either as uptime or downtime depending on monitor settings.
+ // See [SLO documentatio](https://docs.datadoghq.com/service_management/service_level_objectives/monitor/#missing-data)
+ // for detailed information.
History [][]float64 `json:"history,omitempty"`
// For `monitor` based SLOs, this is the last modified timestamp in epoch seconds of the monitor.
MonitorModified *int64 `json:"monitor_modified,omitempty"`
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_history_sli_data.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_history_sli_data.go
index 587877fb4c..bd87010bd2 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_history_sli_data.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_history_sli_data.go
@@ -17,7 +17,14 @@ type SLOHistorySLIData struct {
Errors []SLOHistoryResponseErrorWithType `json:"errors,omitempty"`
// For groups in a grouped SLO, this is the group name.
Group *string `json:"group,omitempty"`
- // For `monitor` based SLOs, this includes the aggregated history as arrays that include timeseries and uptime data where `0=monitor` is in `OK` state and `1=monitor` is in `alert` state.
+ // The state transition history for `monitor` or `time-slice` SLOs. It is represented as
+ // an array of pairs. Each pair is an array containing the timestamp of the transition
+ // as an integer in Unix epoch format in the first element, and the state as an integer in the
+ // second element. An integer value of `0` for state means uptime, `1` means downtime, and `2` means no data.
+ // Periods of no data count as uptime in time-slice SLOs, while for monitor SLOs, no data is counted
+ // either as uptime or downtime depending on monitor settings. See
+ // [SLO documentation](https://docs.datadoghq.com/service_management/service_level_objectives/monitor/#missing-data)
+ // for detailed information.
History [][]float64 `json:"history,omitempty"`
// For `monitor` based SLOs, this is the last modified timestamp in epoch seconds of the monitor.
MonitorModified *int64 `json:"monitor_modified,omitempty"`
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_overall_statuses.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_overall_statuses.go
index 98c5234d0b..0116f7fb04 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_overall_statuses.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_overall_statuses.go
@@ -27,7 +27,8 @@ type SLOOverallStatuses struct {
Status datadog.NullableFloat64 `json:"status,omitempty"`
// The target of the SLO.
Target *float64 `json:"target,omitempty"`
- // The SLO time window options.
+ // The SLO time window options. Note that "custom" is not a valid option for creating
+ // or updating SLOs. It is only used when querying SLO history over custom timeframes.
Timeframe *SLOTimeframe `json:"timeframe,omitempty"`
// UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
UnparsedObject map[string]interface{} `json:"-"`
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_response_data.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_response_data.go
index 7c340d4c1e..0640d778fe 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_response_data.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_response_data.go
@@ -66,7 +66,8 @@ type SLOResponseData struct {
// The thresholds (timeframes and associated targets) for this service level
// objective object.
Thresholds []SLOThreshold `json:"thresholds,omitempty"`
- // The SLO time window options.
+ // The SLO time window options. Note that "custom" is not a valid option for creating
+ // or updating SLOs. It is only used when querying SLO history over custom timeframes.
Timeframe *SLOTimeframe `json:"timeframe,omitempty"`
// The type of the service level objective.
Type *SLOType `json:"type,omitempty"`
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_threshold.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_threshold.go
index 6bbd3c7f92..abf5dfbc45 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_threshold.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_threshold.go
@@ -21,7 +21,8 @@ type SLOThreshold struct {
// Always included in service level objective responses. Ignored in
// create/update requests.
TargetDisplay *string `json:"target_display,omitempty"`
- // The SLO time window options.
+ // The SLO time window options. Note that "custom" is not a valid option for creating
+ // or updating SLOs. It is only used when querying SLO history over custom timeframes.
Timeframe SLOTimeframe `json:"timeframe"`
// The warning value for the service level objective.
Warning *float64 `json:"warning,omitempty"`
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_time_slice_condition.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_time_slice_condition.go
index ab7017b5c9..d2ddd647b3 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_time_slice_condition.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_time_slice_condition.go
@@ -11,12 +11,16 @@ import (
)
// SLOTimeSliceCondition The time-slice condition, composed of 3 parts: 1. the metric timeseries query, 2. the comparator,
-// and 3. the threshold.
+// and 3. the threshold. Optionally, a fourth part, the query interval, can be provided.
type SLOTimeSliceCondition struct {
// The comparator used to compare the SLI value to the threshold.
Comparator SLOTimeSliceComparator `json:"comparator"`
// The queries and formula used to calculate the SLI value.
Query SLOTimeSliceQuery `json:"query"`
+ // The interval used when querying data, which defines the size of a time slice.
+ // Two values are allowed: 60 (1 minute) and 300 (5 minutes).
+ // If not provided, the value defaults to 300 (5 minutes).
+ QueryIntervalSeconds *SLOTimeSliceInterval `json:"query_interval_seconds,omitempty"`
// The threshold value to which each SLI value will be compared.
Threshold float64 `json:"threshold"`
// UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
@@ -90,6 +94,34 @@ func (o *SLOTimeSliceCondition) SetQuery(v SLOTimeSliceQuery) {
o.Query = v
}
+// GetQueryIntervalSeconds returns the QueryIntervalSeconds field value if set, zero value otherwise.
+func (o *SLOTimeSliceCondition) GetQueryIntervalSeconds() SLOTimeSliceInterval {
+ if o == nil || o.QueryIntervalSeconds == nil {
+ var ret SLOTimeSliceInterval
+ return ret
+ }
+ return *o.QueryIntervalSeconds
+}
+
+// GetQueryIntervalSecondsOk returns a tuple with the QueryIntervalSeconds field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SLOTimeSliceCondition) GetQueryIntervalSecondsOk() (*SLOTimeSliceInterval, bool) {
+ if o == nil || o.QueryIntervalSeconds == nil {
+ return nil, false
+ }
+ return o.QueryIntervalSeconds, true
+}
+
+// HasQueryIntervalSeconds returns a boolean if a field has been set.
+func (o *SLOTimeSliceCondition) HasQueryIntervalSeconds() bool {
+ return o != nil && o.QueryIntervalSeconds != nil
+}
+
+// SetQueryIntervalSeconds gets a reference to the given SLOTimeSliceInterval and assigns it to the QueryIntervalSeconds field.
+func (o *SLOTimeSliceCondition) SetQueryIntervalSeconds(v SLOTimeSliceInterval) {
+ o.QueryIntervalSeconds = &v
+}
+
// GetThreshold returns the Threshold field value.
func (o *SLOTimeSliceCondition) GetThreshold() float64 {
if o == nil {
@@ -121,6 +153,9 @@ func (o SLOTimeSliceCondition) MarshalJSON() ([]byte, error) {
}
toSerialize["comparator"] = o.Comparator
toSerialize["query"] = o.Query
+ if o.QueryIntervalSeconds != nil {
+ toSerialize["query_interval_seconds"] = o.QueryIntervalSeconds
+ }
toSerialize["threshold"] = o.Threshold
for key, value := range o.AdditionalProperties {
@@ -132,9 +167,10 @@ func (o SLOTimeSliceCondition) MarshalJSON() ([]byte, error) {
// UnmarshalJSON deserializes the given payload.
func (o *SLOTimeSliceCondition) UnmarshalJSON(bytes []byte) (err error) {
all := struct {
- Comparator *SLOTimeSliceComparator `json:"comparator"`
- Query *SLOTimeSliceQuery `json:"query"`
- Threshold *float64 `json:"threshold"`
+ Comparator *SLOTimeSliceComparator `json:"comparator"`
+ Query *SLOTimeSliceQuery `json:"query"`
+ QueryIntervalSeconds *SLOTimeSliceInterval `json:"query_interval_seconds,omitempty"`
+ Threshold *float64 `json:"threshold"`
}{}
if err = datadog.Unmarshal(bytes, &all); err != nil {
return datadog.Unmarshal(bytes, &o.UnparsedObject)
@@ -150,7 +186,7 @@ func (o *SLOTimeSliceCondition) UnmarshalJSON(bytes []byte) (err error) {
}
additionalProperties := make(map[string]interface{})
if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
- datadog.DeleteKeys(additionalProperties, &[]string{"comparator", "query", "threshold"})
+ datadog.DeleteKeys(additionalProperties, &[]string{"comparator", "query", "query_interval_seconds", "threshold"})
} else {
return err
}
@@ -165,6 +201,11 @@ func (o *SLOTimeSliceCondition) UnmarshalJSON(bytes []byte) (err error) {
hasInvalidField = true
}
o.Query = *all.Query
+ if all.QueryIntervalSeconds != nil && !all.QueryIntervalSeconds.IsValid() {
+ hasInvalidField = true
+ } else {
+ o.QueryIntervalSeconds = all.QueryIntervalSeconds
+ }
o.Threshold = *all.Threshold
if len(additionalProperties) > 0 {
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_time_slice_interval.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_time_slice_interval.go
new file mode 100644
index 0000000000..4d4c3f17ae
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_time_slice_interval.go
@@ -0,0 +1,68 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV1
+
+import (
+ "fmt"
+
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// SLOTimeSliceInterval The interval used when querying data, which defines the size of a time slice.
+// Two values are allowed: 60 (1 minute) and 300 (5 minutes).
+// If not provided, the value defaults to 300 (5 minutes).
+type SLOTimeSliceInterval int32
+
+// List of SLOTimeSliceInterval.
+const (
+ SLOTIMESLICEINTERVAL_ONE_MINUTE SLOTimeSliceInterval = 60
+ SLOTIMESLICEINTERVAL_FIVE_MINUTES SLOTimeSliceInterval = 300
+)
+
+var allowedSLOTimeSliceIntervalEnumValues = []SLOTimeSliceInterval{
+ SLOTIMESLICEINTERVAL_ONE_MINUTE,
+ SLOTIMESLICEINTERVAL_FIVE_MINUTES,
+}
+
+// GetAllowedValues reeturns the list of possible values.
+func (v *SLOTimeSliceInterval) GetAllowedValues() []SLOTimeSliceInterval {
+ return allowedSLOTimeSliceIntervalEnumValues
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (v *SLOTimeSliceInterval) UnmarshalJSON(src []byte) error {
+ var value int32
+ err := datadog.Unmarshal(src, &value)
+ if err != nil {
+ return err
+ }
+ *v = SLOTimeSliceInterval(value)
+ return nil
+}
+
+// NewSLOTimeSliceIntervalFromValue returns a pointer to a valid SLOTimeSliceInterval
+// for the value passed as argument, or an error if the value passed is not allowed by the enum.
+func NewSLOTimeSliceIntervalFromValue(v int32) (*SLOTimeSliceInterval, error) {
+ ev := SLOTimeSliceInterval(v)
+ if ev.IsValid() {
+ return &ev, nil
+ }
+ return nil, fmt.Errorf("invalid value '%v' for SLOTimeSliceInterval: valid values are %v", v, allowedSLOTimeSliceIntervalEnumValues)
+}
+
+// IsValid return true if the value is valid for the enum, false otherwise.
+func (v SLOTimeSliceInterval) IsValid() bool {
+ for _, existing := range allowedSLOTimeSliceIntervalEnumValues {
+ if existing == v {
+ return true
+ }
+ }
+ return false
+}
+
+// Ptr returns reference to SLOTimeSliceInterval value.
+func (v SLOTimeSliceInterval) Ptr() *SLOTimeSliceInterval {
+ return &v
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_time_slice_spec.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_time_slice_spec.go
index 341d8b1e4d..8a68f69e38 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_time_slice_spec.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_time_slice_spec.go
@@ -13,7 +13,7 @@ import (
// SLOTimeSliceSpec A time-slice SLI specification.
type SLOTimeSliceSpec struct {
// The time-slice condition, composed of 3 parts: 1. the metric timeseries query, 2. the comparator,
- // and 3. the threshold.
+ // and 3. the threshold. Optionally, a fourth part, the query interval, can be provided.
TimeSlice SLOTimeSliceCondition `json:"time_slice"`
// UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
UnparsedObject map[string]interface{} `json:"-"`
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_timeframe.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_timeframe.go
index 8b6564d382..1f47c50a85 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_timeframe.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_slo_timeframe.go
@@ -10,7 +10,8 @@ import (
"github.com/DataDog/datadog-api-client-go/v2/api/datadog"
)
-// SLOTimeframe The SLO time window options.
+// SLOTimeframe The SLO time window options. Note that "custom" is not a valid option for creating
+// or updating SLOs. It is only used when querying SLO history over custom timeframes.
type SLOTimeframe string
// List of SLOTimeframe.
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_api_test_config.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_api_test_config.go
index d4faf6b5e9..e6403da0cc 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_api_test_config.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_api_test_config.go
@@ -18,6 +18,8 @@ type SyntheticsAPITestConfig struct {
Request *SyntheticsTestRequest `json:"request,omitempty"`
// When the test subtype is `multi`, the steps of the test.
Steps []SyntheticsAPIStep `json:"steps,omitempty"`
+ // Variables defined from JavaScript code.
+ VariablesFromScript *string `json:"variablesFromScript,omitempty"`
// UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
UnparsedObject map[string]interface{} `json:"-"`
AdditionalProperties map[string]interface{}
@@ -152,6 +154,34 @@ func (o *SyntheticsAPITestConfig) SetSteps(v []SyntheticsAPIStep) {
o.Steps = v
}
+// GetVariablesFromScript returns the VariablesFromScript field value if set, zero value otherwise.
+func (o *SyntheticsAPITestConfig) GetVariablesFromScript() string {
+ if o == nil || o.VariablesFromScript == nil {
+ var ret string
+ return ret
+ }
+ return *o.VariablesFromScript
+}
+
+// GetVariablesFromScriptOk returns a tuple with the VariablesFromScript field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SyntheticsAPITestConfig) GetVariablesFromScriptOk() (*string, bool) {
+ if o == nil || o.VariablesFromScript == nil {
+ return nil, false
+ }
+ return o.VariablesFromScript, true
+}
+
+// HasVariablesFromScript returns a boolean if a field has been set.
+func (o *SyntheticsAPITestConfig) HasVariablesFromScript() bool {
+ return o != nil && o.VariablesFromScript != nil
+}
+
+// SetVariablesFromScript gets a reference to the given string and assigns it to the VariablesFromScript field.
+func (o *SyntheticsAPITestConfig) SetVariablesFromScript(v string) {
+ o.VariablesFromScript = &v
+}
+
// MarshalJSON serializes the struct using spec logic.
func (o SyntheticsAPITestConfig) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
@@ -170,6 +200,9 @@ func (o SyntheticsAPITestConfig) MarshalJSON() ([]byte, error) {
if o.Steps != nil {
toSerialize["steps"] = o.Steps
}
+ if o.VariablesFromScript != nil {
+ toSerialize["variablesFromScript"] = o.VariablesFromScript
+ }
for key, value := range o.AdditionalProperties {
toSerialize[key] = value
@@ -180,17 +213,18 @@ func (o SyntheticsAPITestConfig) MarshalJSON() ([]byte, error) {
// UnmarshalJSON deserializes the given payload.
func (o *SyntheticsAPITestConfig) UnmarshalJSON(bytes []byte) (err error) {
all := struct {
- Assertions []SyntheticsAssertion `json:"assertions,omitempty"`
- ConfigVariables []SyntheticsConfigVariable `json:"configVariables,omitempty"`
- Request *SyntheticsTestRequest `json:"request,omitempty"`
- Steps []SyntheticsAPIStep `json:"steps,omitempty"`
+ Assertions []SyntheticsAssertion `json:"assertions,omitempty"`
+ ConfigVariables []SyntheticsConfigVariable `json:"configVariables,omitempty"`
+ Request *SyntheticsTestRequest `json:"request,omitempty"`
+ Steps []SyntheticsAPIStep `json:"steps,omitempty"`
+ VariablesFromScript *string `json:"variablesFromScript,omitempty"`
}{}
if err = datadog.Unmarshal(bytes, &all); err != nil {
return datadog.Unmarshal(bytes, &o.UnparsedObject)
}
additionalProperties := make(map[string]interface{})
if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
- datadog.DeleteKeys(additionalProperties, &[]string{"assertions", "configVariables", "request", "steps"})
+ datadog.DeleteKeys(additionalProperties, &[]string{"assertions", "configVariables", "request", "steps", "variablesFromScript"})
} else {
return err
}
@@ -203,6 +237,7 @@ func (o *SyntheticsAPITestConfig) UnmarshalJSON(bytes []byte) (err error) {
}
o.Request = all.Request
o.Steps = all.Steps
+ o.VariablesFromScript = all.VariablesFromScript
if len(additionalProperties) > 0 {
o.AdditionalProperties = additionalProperties
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_assertion.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_assertion.go
index b12fab9d66..018f663641 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_assertion.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_assertion.go
@@ -11,9 +11,10 @@ import (
// SyntheticsAssertion - Object describing the assertions type, their associated operator,
// which property they apply, and upon which target.
type SyntheticsAssertion struct {
- SyntheticsAssertionTarget *SyntheticsAssertionTarget
- SyntheticsAssertionJSONPathTarget *SyntheticsAssertionJSONPathTarget
- SyntheticsAssertionXPathTarget *SyntheticsAssertionXPathTarget
+ SyntheticsAssertionTarget *SyntheticsAssertionTarget
+ SyntheticsAssertionJSONPathTarget *SyntheticsAssertionJSONPathTarget
+ SyntheticsAssertionJSONSchemaTarget *SyntheticsAssertionJSONSchemaTarget
+ SyntheticsAssertionXPathTarget *SyntheticsAssertionXPathTarget
// UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
UnparsedObject interface{}
@@ -29,6 +30,11 @@ func SyntheticsAssertionJSONPathTargetAsSyntheticsAssertion(v *SyntheticsAsserti
return SyntheticsAssertion{SyntheticsAssertionJSONPathTarget: v}
}
+// SyntheticsAssertionJSONSchemaTargetAsSyntheticsAssertion is a convenience function that returns SyntheticsAssertionJSONSchemaTarget wrapped in SyntheticsAssertion.
+func SyntheticsAssertionJSONSchemaTargetAsSyntheticsAssertion(v *SyntheticsAssertionJSONSchemaTarget) SyntheticsAssertion {
+ return SyntheticsAssertion{SyntheticsAssertionJSONSchemaTarget: v}
+}
+
// SyntheticsAssertionXPathTargetAsSyntheticsAssertion is a convenience function that returns SyntheticsAssertionXPathTarget wrapped in SyntheticsAssertion.
func SyntheticsAssertionXPathTargetAsSyntheticsAssertion(v *SyntheticsAssertionXPathTarget) SyntheticsAssertion {
return SyntheticsAssertion{SyntheticsAssertionXPathTarget: v}
@@ -72,6 +78,23 @@ func (obj *SyntheticsAssertion) UnmarshalJSON(data []byte) error {
obj.SyntheticsAssertionJSONPathTarget = nil
}
+ // try to unmarshal data into SyntheticsAssertionJSONSchemaTarget
+ err = datadog.Unmarshal(data, &obj.SyntheticsAssertionJSONSchemaTarget)
+ if err == nil {
+ if obj.SyntheticsAssertionJSONSchemaTarget != nil && obj.SyntheticsAssertionJSONSchemaTarget.UnparsedObject == nil {
+ jsonSyntheticsAssertionJSONSchemaTarget, _ := datadog.Marshal(obj.SyntheticsAssertionJSONSchemaTarget)
+ if string(jsonSyntheticsAssertionJSONSchemaTarget) == "{}" { // empty struct
+ obj.SyntheticsAssertionJSONSchemaTarget = nil
+ } else {
+ match++
+ }
+ } else {
+ obj.SyntheticsAssertionJSONSchemaTarget = nil
+ }
+ } else {
+ obj.SyntheticsAssertionJSONSchemaTarget = nil
+ }
+
// try to unmarshal data into SyntheticsAssertionXPathTarget
err = datadog.Unmarshal(data, &obj.SyntheticsAssertionXPathTarget)
if err == nil {
@@ -93,6 +116,7 @@ func (obj *SyntheticsAssertion) UnmarshalJSON(data []byte) error {
// reset to nil
obj.SyntheticsAssertionTarget = nil
obj.SyntheticsAssertionJSONPathTarget = nil
+ obj.SyntheticsAssertionJSONSchemaTarget = nil
obj.SyntheticsAssertionXPathTarget = nil
return datadog.Unmarshal(data, &obj.UnparsedObject)
}
@@ -109,6 +133,10 @@ func (obj SyntheticsAssertion) MarshalJSON() ([]byte, error) {
return datadog.Marshal(&obj.SyntheticsAssertionJSONPathTarget)
}
+ if obj.SyntheticsAssertionJSONSchemaTarget != nil {
+ return datadog.Marshal(&obj.SyntheticsAssertionJSONSchemaTarget)
+ }
+
if obj.SyntheticsAssertionXPathTarget != nil {
return datadog.Marshal(&obj.SyntheticsAssertionXPathTarget)
}
@@ -129,6 +157,10 @@ func (obj *SyntheticsAssertion) GetActualInstance() interface{} {
return obj.SyntheticsAssertionJSONPathTarget
}
+ if obj.SyntheticsAssertionJSONSchemaTarget != nil {
+ return obj.SyntheticsAssertionJSONSchemaTarget
+ }
+
if obj.SyntheticsAssertionXPathTarget != nil {
return obj.SyntheticsAssertionXPathTarget
}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_assertion_json_schema_meta_schema.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_assertion_json_schema_meta_schema.go
new file mode 100644
index 0000000000..5f76685746
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_assertion_json_schema_meta_schema.go
@@ -0,0 +1,66 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV1
+
+import (
+ "fmt"
+
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// SyntheticsAssertionJSONSchemaMetaSchema The JSON Schema meta-schema version used in the assertion.
+type SyntheticsAssertionJSONSchemaMetaSchema string
+
+// List of SyntheticsAssertionJSONSchemaMetaSchema.
+const (
+ SYNTHETICSASSERTIONJSONSCHEMAMETASCHEMA_DRAFT_07 SyntheticsAssertionJSONSchemaMetaSchema = "draft-07"
+ SYNTHETICSASSERTIONJSONSCHEMAMETASCHEMA_DRAFT_06 SyntheticsAssertionJSONSchemaMetaSchema = "draft-06"
+)
+
+var allowedSyntheticsAssertionJSONSchemaMetaSchemaEnumValues = []SyntheticsAssertionJSONSchemaMetaSchema{
+ SYNTHETICSASSERTIONJSONSCHEMAMETASCHEMA_DRAFT_07,
+ SYNTHETICSASSERTIONJSONSCHEMAMETASCHEMA_DRAFT_06,
+}
+
+// GetAllowedValues reeturns the list of possible values.
+func (v *SyntheticsAssertionJSONSchemaMetaSchema) GetAllowedValues() []SyntheticsAssertionJSONSchemaMetaSchema {
+ return allowedSyntheticsAssertionJSONSchemaMetaSchemaEnumValues
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (v *SyntheticsAssertionJSONSchemaMetaSchema) UnmarshalJSON(src []byte) error {
+ var value string
+ err := datadog.Unmarshal(src, &value)
+ if err != nil {
+ return err
+ }
+ *v = SyntheticsAssertionJSONSchemaMetaSchema(value)
+ return nil
+}
+
+// NewSyntheticsAssertionJSONSchemaMetaSchemaFromValue returns a pointer to a valid SyntheticsAssertionJSONSchemaMetaSchema
+// for the value passed as argument, or an error if the value passed is not allowed by the enum.
+func NewSyntheticsAssertionJSONSchemaMetaSchemaFromValue(v string) (*SyntheticsAssertionJSONSchemaMetaSchema, error) {
+ ev := SyntheticsAssertionJSONSchemaMetaSchema(v)
+ if ev.IsValid() {
+ return &ev, nil
+ }
+ return nil, fmt.Errorf("invalid value '%v' for SyntheticsAssertionJSONSchemaMetaSchema: valid values are %v", v, allowedSyntheticsAssertionJSONSchemaMetaSchemaEnumValues)
+}
+
+// IsValid return true if the value is valid for the enum, false otherwise.
+func (v SyntheticsAssertionJSONSchemaMetaSchema) IsValid() bool {
+ for _, existing := range allowedSyntheticsAssertionJSONSchemaMetaSchemaEnumValues {
+ if existing == v {
+ return true
+ }
+ }
+ return false
+}
+
+// Ptr returns reference to SyntheticsAssertionJSONSchemaMetaSchema value.
+func (v SyntheticsAssertionJSONSchemaMetaSchema) Ptr() *SyntheticsAssertionJSONSchemaMetaSchema {
+ return &v
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_assertion_json_schema_operator.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_assertion_json_schema_operator.go
new file mode 100644
index 0000000000..a50eee09f6
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_assertion_json_schema_operator.go
@@ -0,0 +1,64 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV1
+
+import (
+ "fmt"
+
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// SyntheticsAssertionJSONSchemaOperator Assertion operator to apply.
+type SyntheticsAssertionJSONSchemaOperator string
+
+// List of SyntheticsAssertionJSONSchemaOperator.
+const (
+ SYNTHETICSASSERTIONJSONSCHEMAOPERATOR_VALIDATES_JSON_SCHEMA SyntheticsAssertionJSONSchemaOperator = "validatesJSONSchema"
+)
+
+var allowedSyntheticsAssertionJSONSchemaOperatorEnumValues = []SyntheticsAssertionJSONSchemaOperator{
+ SYNTHETICSASSERTIONJSONSCHEMAOPERATOR_VALIDATES_JSON_SCHEMA,
+}
+
+// GetAllowedValues reeturns the list of possible values.
+func (v *SyntheticsAssertionJSONSchemaOperator) GetAllowedValues() []SyntheticsAssertionJSONSchemaOperator {
+ return allowedSyntheticsAssertionJSONSchemaOperatorEnumValues
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (v *SyntheticsAssertionJSONSchemaOperator) UnmarshalJSON(src []byte) error {
+ var value string
+ err := datadog.Unmarshal(src, &value)
+ if err != nil {
+ return err
+ }
+ *v = SyntheticsAssertionJSONSchemaOperator(value)
+ return nil
+}
+
+// NewSyntheticsAssertionJSONSchemaOperatorFromValue returns a pointer to a valid SyntheticsAssertionJSONSchemaOperator
+// for the value passed as argument, or an error if the value passed is not allowed by the enum.
+func NewSyntheticsAssertionJSONSchemaOperatorFromValue(v string) (*SyntheticsAssertionJSONSchemaOperator, error) {
+ ev := SyntheticsAssertionJSONSchemaOperator(v)
+ if ev.IsValid() {
+ return &ev, nil
+ }
+ return nil, fmt.Errorf("invalid value '%v' for SyntheticsAssertionJSONSchemaOperator: valid values are %v", v, allowedSyntheticsAssertionJSONSchemaOperatorEnumValues)
+}
+
+// IsValid return true if the value is valid for the enum, false otherwise.
+func (v SyntheticsAssertionJSONSchemaOperator) IsValid() bool {
+ for _, existing := range allowedSyntheticsAssertionJSONSchemaOperatorEnumValues {
+ if existing == v {
+ return true
+ }
+ }
+ return false
+}
+
+// Ptr returns reference to SyntheticsAssertionJSONSchemaOperator value.
+func (v SyntheticsAssertionJSONSchemaOperator) Ptr() *SyntheticsAssertionJSONSchemaOperator {
+ return &v
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_assertion_json_schema_target.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_assertion_json_schema_target.go
new file mode 100644
index 0000000000..df7e1188ab
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_assertion_json_schema_target.go
@@ -0,0 +1,185 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV1
+
+import (
+ "fmt"
+
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// SyntheticsAssertionJSONSchemaTarget An assertion for the `validatesJSONSchema` operator.
+type SyntheticsAssertionJSONSchemaTarget struct {
+ // Assertion operator to apply.
+ Operator SyntheticsAssertionJSONSchemaOperator `json:"operator"`
+ // Composed target for `validatesJSONSchema` operator.
+ Target *SyntheticsAssertionJSONSchemaTargetTarget `json:"target,omitempty"`
+ // Type of the assertion.
+ Type SyntheticsAssertionType `json:"type"`
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject map[string]interface{} `json:"-"`
+ AdditionalProperties map[string]interface{}
+}
+
+// NewSyntheticsAssertionJSONSchemaTarget instantiates a new SyntheticsAssertionJSONSchemaTarget object.
+// This constructor will assign default values to properties that have it defined,
+// and makes sure properties required by API are set, but the set of arguments
+// will change when the set of required properties is changed.
+func NewSyntheticsAssertionJSONSchemaTarget(operator SyntheticsAssertionJSONSchemaOperator, typeVar SyntheticsAssertionType) *SyntheticsAssertionJSONSchemaTarget {
+ this := SyntheticsAssertionJSONSchemaTarget{}
+ this.Operator = operator
+ this.Type = typeVar
+ return &this
+}
+
+// NewSyntheticsAssertionJSONSchemaTargetWithDefaults instantiates a new SyntheticsAssertionJSONSchemaTarget object.
+// This constructor will only assign default values to properties that have it defined,
+// but it doesn't guarantee that properties required by API are set.
+func NewSyntheticsAssertionJSONSchemaTargetWithDefaults() *SyntheticsAssertionJSONSchemaTarget {
+ this := SyntheticsAssertionJSONSchemaTarget{}
+ return &this
+}
+
+// GetOperator returns the Operator field value.
+func (o *SyntheticsAssertionJSONSchemaTarget) GetOperator() SyntheticsAssertionJSONSchemaOperator {
+ if o == nil {
+ var ret SyntheticsAssertionJSONSchemaOperator
+ return ret
+ }
+ return o.Operator
+}
+
+// GetOperatorOk returns a tuple with the Operator field value
+// and a boolean to check if the value has been set.
+func (o *SyntheticsAssertionJSONSchemaTarget) GetOperatorOk() (*SyntheticsAssertionJSONSchemaOperator, bool) {
+ if o == nil {
+ return nil, false
+ }
+ return &o.Operator, true
+}
+
+// SetOperator sets field value.
+func (o *SyntheticsAssertionJSONSchemaTarget) SetOperator(v SyntheticsAssertionJSONSchemaOperator) {
+ o.Operator = v
+}
+
+// GetTarget returns the Target field value if set, zero value otherwise.
+func (o *SyntheticsAssertionJSONSchemaTarget) GetTarget() SyntheticsAssertionJSONSchemaTargetTarget {
+ if o == nil || o.Target == nil {
+ var ret SyntheticsAssertionJSONSchemaTargetTarget
+ return ret
+ }
+ return *o.Target
+}
+
+// GetTargetOk returns a tuple with the Target field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SyntheticsAssertionJSONSchemaTarget) GetTargetOk() (*SyntheticsAssertionJSONSchemaTargetTarget, bool) {
+ if o == nil || o.Target == nil {
+ return nil, false
+ }
+ return o.Target, true
+}
+
+// HasTarget returns a boolean if a field has been set.
+func (o *SyntheticsAssertionJSONSchemaTarget) HasTarget() bool {
+ return o != nil && o.Target != nil
+}
+
+// SetTarget gets a reference to the given SyntheticsAssertionJSONSchemaTargetTarget and assigns it to the Target field.
+func (o *SyntheticsAssertionJSONSchemaTarget) SetTarget(v SyntheticsAssertionJSONSchemaTargetTarget) {
+ o.Target = &v
+}
+
+// GetType returns the Type field value.
+func (o *SyntheticsAssertionJSONSchemaTarget) GetType() SyntheticsAssertionType {
+ if o == nil {
+ var ret SyntheticsAssertionType
+ return ret
+ }
+ return o.Type
+}
+
+// GetTypeOk returns a tuple with the Type field value
+// and a boolean to check if the value has been set.
+func (o *SyntheticsAssertionJSONSchemaTarget) GetTypeOk() (*SyntheticsAssertionType, bool) {
+ if o == nil {
+ return nil, false
+ }
+ return &o.Type, true
+}
+
+// SetType sets field value.
+func (o *SyntheticsAssertionJSONSchemaTarget) SetType(v SyntheticsAssertionType) {
+ o.Type = v
+}
+
+// MarshalJSON serializes the struct using spec logic.
+func (o SyntheticsAssertionJSONSchemaTarget) MarshalJSON() ([]byte, error) {
+ toSerialize := map[string]interface{}{}
+ if o.UnparsedObject != nil {
+ return datadog.Marshal(o.UnparsedObject)
+ }
+ toSerialize["operator"] = o.Operator
+ if o.Target != nil {
+ toSerialize["target"] = o.Target
+ }
+ toSerialize["type"] = o.Type
+
+ for key, value := range o.AdditionalProperties {
+ toSerialize[key] = value
+ }
+ return datadog.Marshal(toSerialize)
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (o *SyntheticsAssertionJSONSchemaTarget) UnmarshalJSON(bytes []byte) (err error) {
+ all := struct {
+ Operator *SyntheticsAssertionJSONSchemaOperator `json:"operator"`
+ Target *SyntheticsAssertionJSONSchemaTargetTarget `json:"target,omitempty"`
+ Type *SyntheticsAssertionType `json:"type"`
+ }{}
+ if err = datadog.Unmarshal(bytes, &all); err != nil {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+ if all.Operator == nil {
+ return fmt.Errorf("required field operator missing")
+ }
+ if all.Type == nil {
+ return fmt.Errorf("required field type missing")
+ }
+ additionalProperties := make(map[string]interface{})
+ if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
+ datadog.DeleteKeys(additionalProperties, &[]string{"operator", "target", "type"})
+ } else {
+ return err
+ }
+
+ hasInvalidField := false
+ if !all.Operator.IsValid() {
+ hasInvalidField = true
+ } else {
+ o.Operator = *all.Operator
+ }
+ if all.Target != nil && all.Target.UnparsedObject != nil && o.UnparsedObject == nil {
+ hasInvalidField = true
+ }
+ o.Target = all.Target
+ if !all.Type.IsValid() {
+ hasInvalidField = true
+ } else {
+ o.Type = *all.Type
+ }
+
+ if len(additionalProperties) > 0 {
+ o.AdditionalProperties = additionalProperties
+ }
+
+ if hasInvalidField {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_assertion_json_schema_target_target.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_assertion_json_schema_target_target.go
new file mode 100644
index 0000000000..2bcdba9b56
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_assertion_json_schema_target_target.go
@@ -0,0 +1,147 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV1
+
+import (
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// SyntheticsAssertionJSONSchemaTargetTarget Composed target for `validatesJSONSchema` operator.
+type SyntheticsAssertionJSONSchemaTargetTarget struct {
+ // The JSON Schema to assert.
+ JsonSchema *string `json:"jsonSchema,omitempty"`
+ // The JSON Schema meta-schema version used in the assertion.
+ MetaSchema *SyntheticsAssertionJSONSchemaMetaSchema `json:"metaSchema,omitempty"`
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject map[string]interface{} `json:"-"`
+ AdditionalProperties map[string]interface{}
+}
+
+// NewSyntheticsAssertionJSONSchemaTargetTarget instantiates a new SyntheticsAssertionJSONSchemaTargetTarget object.
+// This constructor will assign default values to properties that have it defined,
+// and makes sure properties required by API are set, but the set of arguments
+// will change when the set of required properties is changed.
+func NewSyntheticsAssertionJSONSchemaTargetTarget() *SyntheticsAssertionJSONSchemaTargetTarget {
+ this := SyntheticsAssertionJSONSchemaTargetTarget{}
+ return &this
+}
+
+// NewSyntheticsAssertionJSONSchemaTargetTargetWithDefaults instantiates a new SyntheticsAssertionJSONSchemaTargetTarget object.
+// This constructor will only assign default values to properties that have it defined,
+// but it doesn't guarantee that properties required by API are set.
+func NewSyntheticsAssertionJSONSchemaTargetTargetWithDefaults() *SyntheticsAssertionJSONSchemaTargetTarget {
+ this := SyntheticsAssertionJSONSchemaTargetTarget{}
+ return &this
+}
+
+// GetJsonSchema returns the JsonSchema field value if set, zero value otherwise.
+func (o *SyntheticsAssertionJSONSchemaTargetTarget) GetJsonSchema() string {
+ if o == nil || o.JsonSchema == nil {
+ var ret string
+ return ret
+ }
+ return *o.JsonSchema
+}
+
+// GetJsonSchemaOk returns a tuple with the JsonSchema field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SyntheticsAssertionJSONSchemaTargetTarget) GetJsonSchemaOk() (*string, bool) {
+ if o == nil || o.JsonSchema == nil {
+ return nil, false
+ }
+ return o.JsonSchema, true
+}
+
+// HasJsonSchema returns a boolean if a field has been set.
+func (o *SyntheticsAssertionJSONSchemaTargetTarget) HasJsonSchema() bool {
+ return o != nil && o.JsonSchema != nil
+}
+
+// SetJsonSchema gets a reference to the given string and assigns it to the JsonSchema field.
+func (o *SyntheticsAssertionJSONSchemaTargetTarget) SetJsonSchema(v string) {
+ o.JsonSchema = &v
+}
+
+// GetMetaSchema returns the MetaSchema field value if set, zero value otherwise.
+func (o *SyntheticsAssertionJSONSchemaTargetTarget) GetMetaSchema() SyntheticsAssertionJSONSchemaMetaSchema {
+ if o == nil || o.MetaSchema == nil {
+ var ret SyntheticsAssertionJSONSchemaMetaSchema
+ return ret
+ }
+ return *o.MetaSchema
+}
+
+// GetMetaSchemaOk returns a tuple with the MetaSchema field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SyntheticsAssertionJSONSchemaTargetTarget) GetMetaSchemaOk() (*SyntheticsAssertionJSONSchemaMetaSchema, bool) {
+ if o == nil || o.MetaSchema == nil {
+ return nil, false
+ }
+ return o.MetaSchema, true
+}
+
+// HasMetaSchema returns a boolean if a field has been set.
+func (o *SyntheticsAssertionJSONSchemaTargetTarget) HasMetaSchema() bool {
+ return o != nil && o.MetaSchema != nil
+}
+
+// SetMetaSchema gets a reference to the given SyntheticsAssertionJSONSchemaMetaSchema and assigns it to the MetaSchema field.
+func (o *SyntheticsAssertionJSONSchemaTargetTarget) SetMetaSchema(v SyntheticsAssertionJSONSchemaMetaSchema) {
+ o.MetaSchema = &v
+}
+
+// MarshalJSON serializes the struct using spec logic.
+func (o SyntheticsAssertionJSONSchemaTargetTarget) MarshalJSON() ([]byte, error) {
+ toSerialize := map[string]interface{}{}
+ if o.UnparsedObject != nil {
+ return datadog.Marshal(o.UnparsedObject)
+ }
+ if o.JsonSchema != nil {
+ toSerialize["jsonSchema"] = o.JsonSchema
+ }
+ if o.MetaSchema != nil {
+ toSerialize["metaSchema"] = o.MetaSchema
+ }
+
+ for key, value := range o.AdditionalProperties {
+ toSerialize[key] = value
+ }
+ return datadog.Marshal(toSerialize)
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (o *SyntheticsAssertionJSONSchemaTargetTarget) UnmarshalJSON(bytes []byte) (err error) {
+ all := struct {
+ JsonSchema *string `json:"jsonSchema,omitempty"`
+ MetaSchema *SyntheticsAssertionJSONSchemaMetaSchema `json:"metaSchema,omitempty"`
+ }{}
+ if err = datadog.Unmarshal(bytes, &all); err != nil {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+ additionalProperties := make(map[string]interface{})
+ if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
+ datadog.DeleteKeys(additionalProperties, &[]string{"jsonSchema", "metaSchema"})
+ } else {
+ return err
+ }
+
+ hasInvalidField := false
+ o.JsonSchema = all.JsonSchema
+ if all.MetaSchema != nil && !all.MetaSchema.IsValid() {
+ hasInvalidField = true
+ } else {
+ o.MetaSchema = all.MetaSchema
+ }
+
+ if len(additionalProperties) > 0 {
+ o.AdditionalProperties = additionalProperties
+ }
+
+ if hasInvalidField {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_test_request.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_test_request.go
index b1663210ce..17f9089cd5 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_test_request.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_test_request.go
@@ -32,6 +32,8 @@ type SyntheticsTestRequest struct {
DnsServer *string `json:"dnsServer,omitempty"`
// DNS server port to use for DNS tests.
DnsServerPort *int32 `json:"dnsServerPort,omitempty"`
+ // Files to be used as part of the request in the test.
+ Files []SyntheticsTestRequestBodyFile `json:"files,omitempty"`
// Specifies whether or not the request follows redirects.
FollowRedirects *bool `json:"follow_redirects,omitempty"`
// Headers to include when performing the test.
@@ -400,6 +402,34 @@ func (o *SyntheticsTestRequest) SetDnsServerPort(v int32) {
o.DnsServerPort = &v
}
+// GetFiles returns the Files field value if set, zero value otherwise.
+func (o *SyntheticsTestRequest) GetFiles() []SyntheticsTestRequestBodyFile {
+ if o == nil || o.Files == nil {
+ var ret []SyntheticsTestRequestBodyFile
+ return ret
+ }
+ return o.Files
+}
+
+// GetFilesOk returns a tuple with the Files field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SyntheticsTestRequest) GetFilesOk() (*[]SyntheticsTestRequestBodyFile, bool) {
+ if o == nil || o.Files == nil {
+ return nil, false
+ }
+ return &o.Files, true
+}
+
+// HasFiles returns a boolean if a field has been set.
+func (o *SyntheticsTestRequest) HasFiles() bool {
+ return o != nil && o.Files != nil
+}
+
+// SetFiles gets a reference to the given []SyntheticsTestRequestBodyFile and assigns it to the Files field.
+func (o *SyntheticsTestRequest) SetFiles(v []SyntheticsTestRequestBodyFile) {
+ o.Files = v
+}
+
// GetFollowRedirects returns the FollowRedirects field value if set, zero value otherwise.
func (o *SyntheticsTestRequest) GetFollowRedirects() bool {
if o == nil || o.FollowRedirects == nil {
@@ -943,6 +973,9 @@ func (o SyntheticsTestRequest) MarshalJSON() ([]byte, error) {
if o.DnsServerPort != nil {
toSerialize["dnsServerPort"] = o.DnsServerPort
}
+ if o.Files != nil {
+ toSerialize["files"] = o.Files
+ }
if o.FollowRedirects != nil {
toSerialize["follow_redirects"] = o.FollowRedirects
}
@@ -1018,6 +1051,7 @@ func (o *SyntheticsTestRequest) UnmarshalJSON(bytes []byte) (err error) {
CompressedProtoFile *string `json:"compressedProtoFile,omitempty"`
DnsServer *string `json:"dnsServer,omitempty"`
DnsServerPort *int32 `json:"dnsServerPort,omitempty"`
+ Files []SyntheticsTestRequestBodyFile `json:"files,omitempty"`
FollowRedirects *bool `json:"follow_redirects,omitempty"`
Headers map[string]string `json:"headers,omitempty"`
Host *string `json:"host,omitempty"`
@@ -1042,7 +1076,7 @@ func (o *SyntheticsTestRequest) UnmarshalJSON(bytes []byte) (err error) {
}
additionalProperties := make(map[string]interface{})
if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
- datadog.DeleteKeys(additionalProperties, &[]string{"allow_insecure", "basicAuth", "body", "bodyType", "callType", "certificate", "certificateDomains", "compressedJsonDescriptor", "compressedProtoFile", "dnsServer", "dnsServerPort", "follow_redirects", "headers", "host", "httpVersion", "message", "metadata", "method", "noSavingResponseBody", "numberOfPackets", "persistCookies", "port", "proxy", "query", "servername", "service", "shouldTrackHops", "timeout", "url"})
+ datadog.DeleteKeys(additionalProperties, &[]string{"allow_insecure", "basicAuth", "body", "bodyType", "callType", "certificate", "certificateDomains", "compressedJsonDescriptor", "compressedProtoFile", "dnsServer", "dnsServerPort", "files", "follow_redirects", "headers", "host", "httpVersion", "message", "metadata", "method", "noSavingResponseBody", "numberOfPackets", "persistCookies", "port", "proxy", "query", "servername", "service", "shouldTrackHops", "timeout", "url"})
} else {
return err
}
@@ -1070,6 +1104,7 @@ func (o *SyntheticsTestRequest) UnmarshalJSON(bytes []byte) (err error) {
o.CompressedProtoFile = all.CompressedProtoFile
o.DnsServer = all.DnsServer
o.DnsServerPort = all.DnsServerPort
+ o.Files = all.Files
o.FollowRedirects = all.FollowRedirects
o.Headers = all.Headers
o.Host = all.Host
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_test_request_body_file.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_test_request_body_file.go
new file mode 100644
index 0000000000..0eb0ff0c5e
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_test_request_body_file.go
@@ -0,0 +1,242 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV1
+
+import (
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// SyntheticsTestRequestBodyFile Object describing a file to be used as part of the request in the test.
+type SyntheticsTestRequestBodyFile struct {
+ // Bucket key of the file.
+ BucketKey *string `json:"bucketKey,omitempty"`
+ // Content of the file.
+ Content *string `json:"content,omitempty"`
+ // Name of the file.
+ Name *string `json:"name,omitempty"`
+ // Size of the file.
+ Size *int64 `json:"size,omitempty"`
+ // Type of the file.
+ Type *string `json:"type,omitempty"`
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject map[string]interface{} `json:"-"`
+ AdditionalProperties map[string]interface{}
+}
+
+// NewSyntheticsTestRequestBodyFile instantiates a new SyntheticsTestRequestBodyFile object.
+// This constructor will assign default values to properties that have it defined,
+// and makes sure properties required by API are set, but the set of arguments
+// will change when the set of required properties is changed.
+func NewSyntheticsTestRequestBodyFile() *SyntheticsTestRequestBodyFile {
+ this := SyntheticsTestRequestBodyFile{}
+ return &this
+}
+
+// NewSyntheticsTestRequestBodyFileWithDefaults instantiates a new SyntheticsTestRequestBodyFile object.
+// This constructor will only assign default values to properties that have it defined,
+// but it doesn't guarantee that properties required by API are set.
+func NewSyntheticsTestRequestBodyFileWithDefaults() *SyntheticsTestRequestBodyFile {
+ this := SyntheticsTestRequestBodyFile{}
+ return &this
+}
+
+// GetBucketKey returns the BucketKey field value if set, zero value otherwise.
+func (o *SyntheticsTestRequestBodyFile) GetBucketKey() string {
+ if o == nil || o.BucketKey == nil {
+ var ret string
+ return ret
+ }
+ return *o.BucketKey
+}
+
+// GetBucketKeyOk returns a tuple with the BucketKey field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SyntheticsTestRequestBodyFile) GetBucketKeyOk() (*string, bool) {
+ if o == nil || o.BucketKey == nil {
+ return nil, false
+ }
+ return o.BucketKey, true
+}
+
+// HasBucketKey returns a boolean if a field has been set.
+func (o *SyntheticsTestRequestBodyFile) HasBucketKey() bool {
+ return o != nil && o.BucketKey != nil
+}
+
+// SetBucketKey gets a reference to the given string and assigns it to the BucketKey field.
+func (o *SyntheticsTestRequestBodyFile) SetBucketKey(v string) {
+ o.BucketKey = &v
+}
+
+// GetContent returns the Content field value if set, zero value otherwise.
+func (o *SyntheticsTestRequestBodyFile) GetContent() string {
+ if o == nil || o.Content == nil {
+ var ret string
+ return ret
+ }
+ return *o.Content
+}
+
+// GetContentOk returns a tuple with the Content field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SyntheticsTestRequestBodyFile) GetContentOk() (*string, bool) {
+ if o == nil || o.Content == nil {
+ return nil, false
+ }
+ return o.Content, true
+}
+
+// HasContent returns a boolean if a field has been set.
+func (o *SyntheticsTestRequestBodyFile) HasContent() bool {
+ return o != nil && o.Content != nil
+}
+
+// SetContent gets a reference to the given string and assigns it to the Content field.
+func (o *SyntheticsTestRequestBodyFile) SetContent(v string) {
+ o.Content = &v
+}
+
+// GetName returns the Name field value if set, zero value otherwise.
+func (o *SyntheticsTestRequestBodyFile) GetName() string {
+ if o == nil || o.Name == nil {
+ var ret string
+ return ret
+ }
+ return *o.Name
+}
+
+// GetNameOk returns a tuple with the Name field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SyntheticsTestRequestBodyFile) GetNameOk() (*string, bool) {
+ if o == nil || o.Name == nil {
+ return nil, false
+ }
+ return o.Name, true
+}
+
+// HasName returns a boolean if a field has been set.
+func (o *SyntheticsTestRequestBodyFile) HasName() bool {
+ return o != nil && o.Name != nil
+}
+
+// SetName gets a reference to the given string and assigns it to the Name field.
+func (o *SyntheticsTestRequestBodyFile) SetName(v string) {
+ o.Name = &v
+}
+
+// GetSize returns the Size field value if set, zero value otherwise.
+func (o *SyntheticsTestRequestBodyFile) GetSize() int64 {
+ if o == nil || o.Size == nil {
+ var ret int64
+ return ret
+ }
+ return *o.Size
+}
+
+// GetSizeOk returns a tuple with the Size field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SyntheticsTestRequestBodyFile) GetSizeOk() (*int64, bool) {
+ if o == nil || o.Size == nil {
+ return nil, false
+ }
+ return o.Size, true
+}
+
+// HasSize returns a boolean if a field has been set.
+func (o *SyntheticsTestRequestBodyFile) HasSize() bool {
+ return o != nil && o.Size != nil
+}
+
+// SetSize gets a reference to the given int64 and assigns it to the Size field.
+func (o *SyntheticsTestRequestBodyFile) SetSize(v int64) {
+ o.Size = &v
+}
+
+// GetType returns the Type field value if set, zero value otherwise.
+func (o *SyntheticsTestRequestBodyFile) GetType() string {
+ if o == nil || o.Type == nil {
+ var ret string
+ return ret
+ }
+ return *o.Type
+}
+
+// GetTypeOk returns a tuple with the Type field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SyntheticsTestRequestBodyFile) GetTypeOk() (*string, bool) {
+ if o == nil || o.Type == nil {
+ return nil, false
+ }
+ return o.Type, true
+}
+
+// HasType returns a boolean if a field has been set.
+func (o *SyntheticsTestRequestBodyFile) HasType() bool {
+ return o != nil && o.Type != nil
+}
+
+// SetType gets a reference to the given string and assigns it to the Type field.
+func (o *SyntheticsTestRequestBodyFile) SetType(v string) {
+ o.Type = &v
+}
+
+// MarshalJSON serializes the struct using spec logic.
+func (o SyntheticsTestRequestBodyFile) MarshalJSON() ([]byte, error) {
+ toSerialize := map[string]interface{}{}
+ if o.UnparsedObject != nil {
+ return datadog.Marshal(o.UnparsedObject)
+ }
+ if o.BucketKey != nil {
+ toSerialize["bucketKey"] = o.BucketKey
+ }
+ if o.Content != nil {
+ toSerialize["content"] = o.Content
+ }
+ if o.Name != nil {
+ toSerialize["name"] = o.Name
+ }
+ if o.Size != nil {
+ toSerialize["size"] = o.Size
+ }
+ if o.Type != nil {
+ toSerialize["type"] = o.Type
+ }
+
+ for key, value := range o.AdditionalProperties {
+ toSerialize[key] = value
+ }
+ return datadog.Marshal(toSerialize)
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (o *SyntheticsTestRequestBodyFile) UnmarshalJSON(bytes []byte) (err error) {
+ all := struct {
+ BucketKey *string `json:"bucketKey,omitempty"`
+ Content *string `json:"content,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Size *int64 `json:"size,omitempty"`
+ Type *string `json:"type,omitempty"`
+ }{}
+ if err = datadog.Unmarshal(bytes, &all); err != nil {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+ additionalProperties := make(map[string]interface{})
+ if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
+ datadog.DeleteKeys(additionalProperties, &[]string{"bucketKey", "content", "name", "size", "type"})
+ } else {
+ return err
+ }
+ o.BucketKey = all.BucketKey
+ o.Content = all.Content
+ o.Name = all.Name
+ o.Size = all.Size
+ o.Type = all.Type
+
+ if len(additionalProperties) > 0 {
+ o.AdditionalProperties = additionalProperties
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_test_request_body_type.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_test_request_body_type.go
index bda04fb419..490a4b4956 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_test_request_body_type.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_synthetics_test_request_body_type.go
@@ -21,6 +21,8 @@ const (
SYNTHETICSTESTREQUESTBODYTYPE_TEXT_HTML SyntheticsTestRequestBodyType = "text/html"
SYNTHETICSTESTREQUESTBODYTYPE_APPLICATION_X_WWW_FORM_URLENCODED SyntheticsTestRequestBodyType = "application/x-www-form-urlencoded"
SYNTHETICSTESTREQUESTBODYTYPE_GRAPHQL SyntheticsTestRequestBodyType = "graphql"
+ SYNTHETICSTESTREQUESTBODYTYPE_APPLICATION_OCTET_STREAM SyntheticsTestRequestBodyType = "application/octet-stream"
+ SYNTHETICSTESTREQUESTBODYTYPE_MULTIPART_FORM_DATA SyntheticsTestRequestBodyType = "multipart/form-data"
)
var allowedSyntheticsTestRequestBodyTypeEnumValues = []SyntheticsTestRequestBodyType{
@@ -30,6 +32,8 @@ var allowedSyntheticsTestRequestBodyTypeEnumValues = []SyntheticsTestRequestBody
SYNTHETICSTESTREQUESTBODYTYPE_TEXT_HTML,
SYNTHETICSTESTREQUESTBODYTYPE_APPLICATION_X_WWW_FORM_URLENCODED,
SYNTHETICSTESTREQUESTBODYTYPE_GRAPHQL,
+ SYNTHETICSTESTREQUESTBODYTYPE_APPLICATION_OCTET_STREAM,
+ SYNTHETICSTESTREQUESTBODYTYPE_MULTIPART_FORM_DATA,
}
// GetAllowedValues reeturns the list of possible values.
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_body.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_body.go
deleted file mode 100644
index 05eb659c3c..0000000000
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_body.go
+++ /dev/null
@@ -1,332 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2019-Present Datadog, Inc.
-
-package datadogV1
-
-import (
- "time"
-
- "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
-)
-
-// UsageAttributionBody Usage Summary by tag for a given organization.
-type UsageAttributionBody struct {
- // Datetime in ISO-8601 format, UTC, precise to month: [YYYY-MM].
- Month *time.Time `json:"month,omitempty"`
- // The name of the organization.
- OrgName *string `json:"org_name,omitempty"`
- // The organization public ID.
- PublicId *string `json:"public_id,omitempty"`
- // The source of the usage attribution tag configuration and the selected tags in the format `::://////`.
- TagConfigSource *string `json:"tag_config_source,omitempty"`
- // Tag keys and values.
- //
- // A `null` value here means that the requested tag breakdown cannot be applied because it does not match the [tags
- // configured for usage attribution](https://docs.datadoghq.com/account_management/billing/usage_attribution/#getting-started).
- // In this scenario the API returns the total usage, not broken down by tags.
- Tags map[string][]string `json:"tags,omitempty"`
- // Shows the the most recent hour in the current month for all organizations for which all usages were calculated.
- UpdatedAt *string `json:"updated_at,omitempty"`
- // Fields in Usage Summary by tag(s).
- Values *UsageAttributionValues `json:"values,omitempty"`
- // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
- UnparsedObject map[string]interface{} `json:"-"`
- AdditionalProperties map[string]interface{}
-}
-
-// NewUsageAttributionBody instantiates a new UsageAttributionBody object.
-// This constructor will assign default values to properties that have it defined,
-// and makes sure properties required by API are set, but the set of arguments
-// will change when the set of required properties is changed.
-func NewUsageAttributionBody() *UsageAttributionBody {
- this := UsageAttributionBody{}
- return &this
-}
-
-// NewUsageAttributionBodyWithDefaults instantiates a new UsageAttributionBody object.
-// This constructor will only assign default values to properties that have it defined,
-// but it doesn't guarantee that properties required by API are set.
-func NewUsageAttributionBodyWithDefaults() *UsageAttributionBody {
- this := UsageAttributionBody{}
- return &this
-}
-
-// GetMonth returns the Month field value if set, zero value otherwise.
-func (o *UsageAttributionBody) GetMonth() time.Time {
- if o == nil || o.Month == nil {
- var ret time.Time
- return ret
- }
- return *o.Month
-}
-
-// GetMonthOk returns a tuple with the Month field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionBody) GetMonthOk() (*time.Time, bool) {
- if o == nil || o.Month == nil {
- return nil, false
- }
- return o.Month, true
-}
-
-// HasMonth returns a boolean if a field has been set.
-func (o *UsageAttributionBody) HasMonth() bool {
- return o != nil && o.Month != nil
-}
-
-// SetMonth gets a reference to the given time.Time and assigns it to the Month field.
-func (o *UsageAttributionBody) SetMonth(v time.Time) {
- o.Month = &v
-}
-
-// GetOrgName returns the OrgName field value if set, zero value otherwise.
-func (o *UsageAttributionBody) GetOrgName() string {
- if o == nil || o.OrgName == nil {
- var ret string
- return ret
- }
- return *o.OrgName
-}
-
-// GetOrgNameOk returns a tuple with the OrgName field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionBody) GetOrgNameOk() (*string, bool) {
- if o == nil || o.OrgName == nil {
- return nil, false
- }
- return o.OrgName, true
-}
-
-// HasOrgName returns a boolean if a field has been set.
-func (o *UsageAttributionBody) HasOrgName() bool {
- return o != nil && o.OrgName != nil
-}
-
-// SetOrgName gets a reference to the given string and assigns it to the OrgName field.
-func (o *UsageAttributionBody) SetOrgName(v string) {
- o.OrgName = &v
-}
-
-// GetPublicId returns the PublicId field value if set, zero value otherwise.
-func (o *UsageAttributionBody) GetPublicId() string {
- if o == nil || o.PublicId == nil {
- var ret string
- return ret
- }
- return *o.PublicId
-}
-
-// GetPublicIdOk returns a tuple with the PublicId field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionBody) GetPublicIdOk() (*string, bool) {
- if o == nil || o.PublicId == nil {
- return nil, false
- }
- return o.PublicId, true
-}
-
-// HasPublicId returns a boolean if a field has been set.
-func (o *UsageAttributionBody) HasPublicId() bool {
- return o != nil && o.PublicId != nil
-}
-
-// SetPublicId gets a reference to the given string and assigns it to the PublicId field.
-func (o *UsageAttributionBody) SetPublicId(v string) {
- o.PublicId = &v
-}
-
-// GetTagConfigSource returns the TagConfigSource field value if set, zero value otherwise.
-func (o *UsageAttributionBody) GetTagConfigSource() string {
- if o == nil || o.TagConfigSource == nil {
- var ret string
- return ret
- }
- return *o.TagConfigSource
-}
-
-// GetTagConfigSourceOk returns a tuple with the TagConfigSource field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionBody) GetTagConfigSourceOk() (*string, bool) {
- if o == nil || o.TagConfigSource == nil {
- return nil, false
- }
- return o.TagConfigSource, true
-}
-
-// HasTagConfigSource returns a boolean if a field has been set.
-func (o *UsageAttributionBody) HasTagConfigSource() bool {
- return o != nil && o.TagConfigSource != nil
-}
-
-// SetTagConfigSource gets a reference to the given string and assigns it to the TagConfigSource field.
-func (o *UsageAttributionBody) SetTagConfigSource(v string) {
- o.TagConfigSource = &v
-}
-
-// GetTags returns the Tags field value if set, zero value otherwise (both if not set or set to explicit null).
-func (o *UsageAttributionBody) GetTags() map[string][]string {
- if o == nil {
- var ret map[string][]string
- return ret
- }
- return o.Tags
-}
-
-// GetTagsOk returns a tuple with the Tags field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-// NOTE: If the value is an explicit nil, `nil, true` will be returned.
-func (o *UsageAttributionBody) GetTagsOk() (*map[string][]string, bool) {
- if o == nil || o.Tags == nil {
- return nil, false
- }
- return &o.Tags, true
-}
-
-// HasTags returns a boolean if a field has been set.
-func (o *UsageAttributionBody) HasTags() bool {
- return o != nil && o.Tags != nil
-}
-
-// SetTags gets a reference to the given map[string][]string and assigns it to the Tags field.
-func (o *UsageAttributionBody) SetTags(v map[string][]string) {
- o.Tags = v
-}
-
-// GetUpdatedAt returns the UpdatedAt field value if set, zero value otherwise.
-func (o *UsageAttributionBody) GetUpdatedAt() string {
- if o == nil || o.UpdatedAt == nil {
- var ret string
- return ret
- }
- return *o.UpdatedAt
-}
-
-// GetUpdatedAtOk returns a tuple with the UpdatedAt field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionBody) GetUpdatedAtOk() (*string, bool) {
- if o == nil || o.UpdatedAt == nil {
- return nil, false
- }
- return o.UpdatedAt, true
-}
-
-// HasUpdatedAt returns a boolean if a field has been set.
-func (o *UsageAttributionBody) HasUpdatedAt() bool {
- return o != nil && o.UpdatedAt != nil
-}
-
-// SetUpdatedAt gets a reference to the given string and assigns it to the UpdatedAt field.
-func (o *UsageAttributionBody) SetUpdatedAt(v string) {
- o.UpdatedAt = &v
-}
-
-// GetValues returns the Values field value if set, zero value otherwise.
-func (o *UsageAttributionBody) GetValues() UsageAttributionValues {
- if o == nil || o.Values == nil {
- var ret UsageAttributionValues
- return ret
- }
- return *o.Values
-}
-
-// GetValuesOk returns a tuple with the Values field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionBody) GetValuesOk() (*UsageAttributionValues, bool) {
- if o == nil || o.Values == nil {
- return nil, false
- }
- return o.Values, true
-}
-
-// HasValues returns a boolean if a field has been set.
-func (o *UsageAttributionBody) HasValues() bool {
- return o != nil && o.Values != nil
-}
-
-// SetValues gets a reference to the given UsageAttributionValues and assigns it to the Values field.
-func (o *UsageAttributionBody) SetValues(v UsageAttributionValues) {
- o.Values = &v
-}
-
-// MarshalJSON serializes the struct using spec logic.
-func (o UsageAttributionBody) MarshalJSON() ([]byte, error) {
- toSerialize := map[string]interface{}{}
- if o.UnparsedObject != nil {
- return datadog.Marshal(o.UnparsedObject)
- }
- if o.Month != nil {
- if o.Month.Nanosecond() == 0 {
- toSerialize["month"] = o.Month.Format("2006-01-02T15:04:05Z07:00")
- } else {
- toSerialize["month"] = o.Month.Format("2006-01-02T15:04:05.000Z07:00")
- }
- }
- if o.OrgName != nil {
- toSerialize["org_name"] = o.OrgName
- }
- if o.PublicId != nil {
- toSerialize["public_id"] = o.PublicId
- }
- if o.TagConfigSource != nil {
- toSerialize["tag_config_source"] = o.TagConfigSource
- }
- if o.Tags != nil {
- toSerialize["tags"] = o.Tags
- }
- if o.UpdatedAt != nil {
- toSerialize["updated_at"] = o.UpdatedAt
- }
- if o.Values != nil {
- toSerialize["values"] = o.Values
- }
-
- for key, value := range o.AdditionalProperties {
- toSerialize[key] = value
- }
- return datadog.Marshal(toSerialize)
-}
-
-// UnmarshalJSON deserializes the given payload.
-func (o *UsageAttributionBody) UnmarshalJSON(bytes []byte) (err error) {
- all := struct {
- Month *time.Time `json:"month,omitempty"`
- OrgName *string `json:"org_name,omitempty"`
- PublicId *string `json:"public_id,omitempty"`
- TagConfigSource *string `json:"tag_config_source,omitempty"`
- Tags map[string][]string `json:"tags,omitempty"`
- UpdatedAt *string `json:"updated_at,omitempty"`
- Values *UsageAttributionValues `json:"values,omitempty"`
- }{}
- if err = datadog.Unmarshal(bytes, &all); err != nil {
- return datadog.Unmarshal(bytes, &o.UnparsedObject)
- }
- additionalProperties := make(map[string]interface{})
- if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
- datadog.DeleteKeys(additionalProperties, &[]string{"month", "org_name", "public_id", "tag_config_source", "tags", "updated_at", "values"})
- } else {
- return err
- }
-
- hasInvalidField := false
- o.Month = all.Month
- o.OrgName = all.OrgName
- o.PublicId = all.PublicId
- o.TagConfigSource = all.TagConfigSource
- o.Tags = all.Tags
- o.UpdatedAt = all.UpdatedAt
- if all.Values != nil && all.Values.UnparsedObject != nil && o.UnparsedObject == nil {
- hasInvalidField = true
- }
- o.Values = all.Values
-
- if len(additionalProperties) > 0 {
- o.AdditionalProperties = additionalProperties
- }
-
- if hasInvalidField {
- return datadog.Unmarshal(bytes, &o.UnparsedObject)
- }
-
- return nil
-}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_metadata.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_metadata.go
deleted file mode 100644
index 4f064f20db..0000000000
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_metadata.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2019-Present Datadog, Inc.
-
-package datadogV1
-
-import (
- "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
-)
-
-// UsageAttributionMetadata The object containing document metadata.
-type UsageAttributionMetadata struct {
- // An array of available aggregates.
- Aggregates []UsageAttributionAggregatesBody `json:"aggregates,omitempty"`
- // The metadata for the current pagination.
- Pagination *UsageAttributionPagination `json:"pagination,omitempty"`
- // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
- UnparsedObject map[string]interface{} `json:"-"`
- AdditionalProperties map[string]interface{}
-}
-
-// NewUsageAttributionMetadata instantiates a new UsageAttributionMetadata object.
-// This constructor will assign default values to properties that have it defined,
-// and makes sure properties required by API are set, but the set of arguments
-// will change when the set of required properties is changed.
-func NewUsageAttributionMetadata() *UsageAttributionMetadata {
- this := UsageAttributionMetadata{}
- return &this
-}
-
-// NewUsageAttributionMetadataWithDefaults instantiates a new UsageAttributionMetadata object.
-// This constructor will only assign default values to properties that have it defined,
-// but it doesn't guarantee that properties required by API are set.
-func NewUsageAttributionMetadataWithDefaults() *UsageAttributionMetadata {
- this := UsageAttributionMetadata{}
- return &this
-}
-
-// GetAggregates returns the Aggregates field value if set, zero value otherwise.
-func (o *UsageAttributionMetadata) GetAggregates() []UsageAttributionAggregatesBody {
- if o == nil || o.Aggregates == nil {
- var ret []UsageAttributionAggregatesBody
- return ret
- }
- return o.Aggregates
-}
-
-// GetAggregatesOk returns a tuple with the Aggregates field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionMetadata) GetAggregatesOk() (*[]UsageAttributionAggregatesBody, bool) {
- if o == nil || o.Aggregates == nil {
- return nil, false
- }
- return &o.Aggregates, true
-}
-
-// HasAggregates returns a boolean if a field has been set.
-func (o *UsageAttributionMetadata) HasAggregates() bool {
- return o != nil && o.Aggregates != nil
-}
-
-// SetAggregates gets a reference to the given []UsageAttributionAggregatesBody and assigns it to the Aggregates field.
-func (o *UsageAttributionMetadata) SetAggregates(v []UsageAttributionAggregatesBody) {
- o.Aggregates = v
-}
-
-// GetPagination returns the Pagination field value if set, zero value otherwise.
-func (o *UsageAttributionMetadata) GetPagination() UsageAttributionPagination {
- if o == nil || o.Pagination == nil {
- var ret UsageAttributionPagination
- return ret
- }
- return *o.Pagination
-}
-
-// GetPaginationOk returns a tuple with the Pagination field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionMetadata) GetPaginationOk() (*UsageAttributionPagination, bool) {
- if o == nil || o.Pagination == nil {
- return nil, false
- }
- return o.Pagination, true
-}
-
-// HasPagination returns a boolean if a field has been set.
-func (o *UsageAttributionMetadata) HasPagination() bool {
- return o != nil && o.Pagination != nil
-}
-
-// SetPagination gets a reference to the given UsageAttributionPagination and assigns it to the Pagination field.
-func (o *UsageAttributionMetadata) SetPagination(v UsageAttributionPagination) {
- o.Pagination = &v
-}
-
-// MarshalJSON serializes the struct using spec logic.
-func (o UsageAttributionMetadata) MarshalJSON() ([]byte, error) {
- toSerialize := map[string]interface{}{}
- if o.UnparsedObject != nil {
- return datadog.Marshal(o.UnparsedObject)
- }
- if o.Aggregates != nil {
- toSerialize["aggregates"] = o.Aggregates
- }
- if o.Pagination != nil {
- toSerialize["pagination"] = o.Pagination
- }
-
- for key, value := range o.AdditionalProperties {
- toSerialize[key] = value
- }
- return datadog.Marshal(toSerialize)
-}
-
-// UnmarshalJSON deserializes the given payload.
-func (o *UsageAttributionMetadata) UnmarshalJSON(bytes []byte) (err error) {
- all := struct {
- Aggregates []UsageAttributionAggregatesBody `json:"aggregates,omitempty"`
- Pagination *UsageAttributionPagination `json:"pagination,omitempty"`
- }{}
- if err = datadog.Unmarshal(bytes, &all); err != nil {
- return datadog.Unmarshal(bytes, &o.UnparsedObject)
- }
- additionalProperties := make(map[string]interface{})
- if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
- datadog.DeleteKeys(additionalProperties, &[]string{"aggregates", "pagination"})
- } else {
- return err
- }
-
- hasInvalidField := false
- o.Aggregates = all.Aggregates
- if all.Pagination != nil && all.Pagination.UnparsedObject != nil && o.UnparsedObject == nil {
- hasInvalidField = true
- }
- o.Pagination = all.Pagination
-
- if len(additionalProperties) > 0 {
- o.AdditionalProperties = additionalProperties
- }
-
- if hasInvalidField {
- return datadog.Unmarshal(bytes, &o.UnparsedObject)
- }
-
- return nil
-}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_pagination.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_pagination.go
deleted file mode 100644
index 708b1a1aee..0000000000
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_pagination.go
+++ /dev/null
@@ -1,242 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2019-Present Datadog, Inc.
-
-package datadogV1
-
-import (
- "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
-)
-
-// UsageAttributionPagination The metadata for the current pagination.
-type UsageAttributionPagination struct {
- // Maximum amount of records to be returned.
- Limit *int64 `json:"limit,omitempty"`
- // Records to be skipped before beginning to return.
- Offset *int64 `json:"offset,omitempty"`
- // Direction to sort by.
- SortDirection *string `json:"sort_direction,omitempty"`
- // Field to sort by.
- SortName *string `json:"sort_name,omitempty"`
- // Total number of records.
- TotalNumberOfRecords *int64 `json:"total_number_of_records,omitempty"`
- // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
- UnparsedObject map[string]interface{} `json:"-"`
- AdditionalProperties map[string]interface{}
-}
-
-// NewUsageAttributionPagination instantiates a new UsageAttributionPagination object.
-// This constructor will assign default values to properties that have it defined,
-// and makes sure properties required by API are set, but the set of arguments
-// will change when the set of required properties is changed.
-func NewUsageAttributionPagination() *UsageAttributionPagination {
- this := UsageAttributionPagination{}
- return &this
-}
-
-// NewUsageAttributionPaginationWithDefaults instantiates a new UsageAttributionPagination object.
-// This constructor will only assign default values to properties that have it defined,
-// but it doesn't guarantee that properties required by API are set.
-func NewUsageAttributionPaginationWithDefaults() *UsageAttributionPagination {
- this := UsageAttributionPagination{}
- return &this
-}
-
-// GetLimit returns the Limit field value if set, zero value otherwise.
-func (o *UsageAttributionPagination) GetLimit() int64 {
- if o == nil || o.Limit == nil {
- var ret int64
- return ret
- }
- return *o.Limit
-}
-
-// GetLimitOk returns a tuple with the Limit field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionPagination) GetLimitOk() (*int64, bool) {
- if o == nil || o.Limit == nil {
- return nil, false
- }
- return o.Limit, true
-}
-
-// HasLimit returns a boolean if a field has been set.
-func (o *UsageAttributionPagination) HasLimit() bool {
- return o != nil && o.Limit != nil
-}
-
-// SetLimit gets a reference to the given int64 and assigns it to the Limit field.
-func (o *UsageAttributionPagination) SetLimit(v int64) {
- o.Limit = &v
-}
-
-// GetOffset returns the Offset field value if set, zero value otherwise.
-func (o *UsageAttributionPagination) GetOffset() int64 {
- if o == nil || o.Offset == nil {
- var ret int64
- return ret
- }
- return *o.Offset
-}
-
-// GetOffsetOk returns a tuple with the Offset field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionPagination) GetOffsetOk() (*int64, bool) {
- if o == nil || o.Offset == nil {
- return nil, false
- }
- return o.Offset, true
-}
-
-// HasOffset returns a boolean if a field has been set.
-func (o *UsageAttributionPagination) HasOffset() bool {
- return o != nil && o.Offset != nil
-}
-
-// SetOffset gets a reference to the given int64 and assigns it to the Offset field.
-func (o *UsageAttributionPagination) SetOffset(v int64) {
- o.Offset = &v
-}
-
-// GetSortDirection returns the SortDirection field value if set, zero value otherwise.
-func (o *UsageAttributionPagination) GetSortDirection() string {
- if o == nil || o.SortDirection == nil {
- var ret string
- return ret
- }
- return *o.SortDirection
-}
-
-// GetSortDirectionOk returns a tuple with the SortDirection field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionPagination) GetSortDirectionOk() (*string, bool) {
- if o == nil || o.SortDirection == nil {
- return nil, false
- }
- return o.SortDirection, true
-}
-
-// HasSortDirection returns a boolean if a field has been set.
-func (o *UsageAttributionPagination) HasSortDirection() bool {
- return o != nil && o.SortDirection != nil
-}
-
-// SetSortDirection gets a reference to the given string and assigns it to the SortDirection field.
-func (o *UsageAttributionPagination) SetSortDirection(v string) {
- o.SortDirection = &v
-}
-
-// GetSortName returns the SortName field value if set, zero value otherwise.
-func (o *UsageAttributionPagination) GetSortName() string {
- if o == nil || o.SortName == nil {
- var ret string
- return ret
- }
- return *o.SortName
-}
-
-// GetSortNameOk returns a tuple with the SortName field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionPagination) GetSortNameOk() (*string, bool) {
- if o == nil || o.SortName == nil {
- return nil, false
- }
- return o.SortName, true
-}
-
-// HasSortName returns a boolean if a field has been set.
-func (o *UsageAttributionPagination) HasSortName() bool {
- return o != nil && o.SortName != nil
-}
-
-// SetSortName gets a reference to the given string and assigns it to the SortName field.
-func (o *UsageAttributionPagination) SetSortName(v string) {
- o.SortName = &v
-}
-
-// GetTotalNumberOfRecords returns the TotalNumberOfRecords field value if set, zero value otherwise.
-func (o *UsageAttributionPagination) GetTotalNumberOfRecords() int64 {
- if o == nil || o.TotalNumberOfRecords == nil {
- var ret int64
- return ret
- }
- return *o.TotalNumberOfRecords
-}
-
-// GetTotalNumberOfRecordsOk returns a tuple with the TotalNumberOfRecords field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionPagination) GetTotalNumberOfRecordsOk() (*int64, bool) {
- if o == nil || o.TotalNumberOfRecords == nil {
- return nil, false
- }
- return o.TotalNumberOfRecords, true
-}
-
-// HasTotalNumberOfRecords returns a boolean if a field has been set.
-func (o *UsageAttributionPagination) HasTotalNumberOfRecords() bool {
- return o != nil && o.TotalNumberOfRecords != nil
-}
-
-// SetTotalNumberOfRecords gets a reference to the given int64 and assigns it to the TotalNumberOfRecords field.
-func (o *UsageAttributionPagination) SetTotalNumberOfRecords(v int64) {
- o.TotalNumberOfRecords = &v
-}
-
-// MarshalJSON serializes the struct using spec logic.
-func (o UsageAttributionPagination) MarshalJSON() ([]byte, error) {
- toSerialize := map[string]interface{}{}
- if o.UnparsedObject != nil {
- return datadog.Marshal(o.UnparsedObject)
- }
- if o.Limit != nil {
- toSerialize["limit"] = o.Limit
- }
- if o.Offset != nil {
- toSerialize["offset"] = o.Offset
- }
- if o.SortDirection != nil {
- toSerialize["sort_direction"] = o.SortDirection
- }
- if o.SortName != nil {
- toSerialize["sort_name"] = o.SortName
- }
- if o.TotalNumberOfRecords != nil {
- toSerialize["total_number_of_records"] = o.TotalNumberOfRecords
- }
-
- for key, value := range o.AdditionalProperties {
- toSerialize[key] = value
- }
- return datadog.Marshal(toSerialize)
-}
-
-// UnmarshalJSON deserializes the given payload.
-func (o *UsageAttributionPagination) UnmarshalJSON(bytes []byte) (err error) {
- all := struct {
- Limit *int64 `json:"limit,omitempty"`
- Offset *int64 `json:"offset,omitempty"`
- SortDirection *string `json:"sort_direction,omitempty"`
- SortName *string `json:"sort_name,omitempty"`
- TotalNumberOfRecords *int64 `json:"total_number_of_records,omitempty"`
- }{}
- if err = datadog.Unmarshal(bytes, &all); err != nil {
- return datadog.Unmarshal(bytes, &o.UnparsedObject)
- }
- additionalProperties := make(map[string]interface{})
- if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
- datadog.DeleteKeys(additionalProperties, &[]string{"limit", "offset", "sort_direction", "sort_name", "total_number_of_records"})
- } else {
- return err
- }
- o.Limit = all.Limit
- o.Offset = all.Offset
- o.SortDirection = all.SortDirection
- o.SortName = all.SortName
- o.TotalNumberOfRecords = all.TotalNumberOfRecords
-
- if len(additionalProperties) > 0 {
- o.AdditionalProperties = additionalProperties
- }
-
- return nil
-}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_response.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_response.go
deleted file mode 100644
index 677006e346..0000000000
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_response.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2019-Present Datadog, Inc.
-
-package datadogV1
-
-import (
- "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
-)
-
-// UsageAttributionResponse Response containing the Usage Summary by tag(s).
-type UsageAttributionResponse struct {
- // The object containing document metadata.
- Metadata *UsageAttributionMetadata `json:"metadata,omitempty"`
- // Get usage summary by tag(s).
- Usage []UsageAttributionBody `json:"usage,omitempty"`
- // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
- UnparsedObject map[string]interface{} `json:"-"`
- AdditionalProperties map[string]interface{}
-}
-
-// NewUsageAttributionResponse instantiates a new UsageAttributionResponse object.
-// This constructor will assign default values to properties that have it defined,
-// and makes sure properties required by API are set, but the set of arguments
-// will change when the set of required properties is changed.
-func NewUsageAttributionResponse() *UsageAttributionResponse {
- this := UsageAttributionResponse{}
- return &this
-}
-
-// NewUsageAttributionResponseWithDefaults instantiates a new UsageAttributionResponse object.
-// This constructor will only assign default values to properties that have it defined,
-// but it doesn't guarantee that properties required by API are set.
-func NewUsageAttributionResponseWithDefaults() *UsageAttributionResponse {
- this := UsageAttributionResponse{}
- return &this
-}
-
-// GetMetadata returns the Metadata field value if set, zero value otherwise.
-func (o *UsageAttributionResponse) GetMetadata() UsageAttributionMetadata {
- if o == nil || o.Metadata == nil {
- var ret UsageAttributionMetadata
- return ret
- }
- return *o.Metadata
-}
-
-// GetMetadataOk returns a tuple with the Metadata field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionResponse) GetMetadataOk() (*UsageAttributionMetadata, bool) {
- if o == nil || o.Metadata == nil {
- return nil, false
- }
- return o.Metadata, true
-}
-
-// HasMetadata returns a boolean if a field has been set.
-func (o *UsageAttributionResponse) HasMetadata() bool {
- return o != nil && o.Metadata != nil
-}
-
-// SetMetadata gets a reference to the given UsageAttributionMetadata and assigns it to the Metadata field.
-func (o *UsageAttributionResponse) SetMetadata(v UsageAttributionMetadata) {
- o.Metadata = &v
-}
-
-// GetUsage returns the Usage field value if set, zero value otherwise.
-func (o *UsageAttributionResponse) GetUsage() []UsageAttributionBody {
- if o == nil || o.Usage == nil {
- var ret []UsageAttributionBody
- return ret
- }
- return o.Usage
-}
-
-// GetUsageOk returns a tuple with the Usage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionResponse) GetUsageOk() (*[]UsageAttributionBody, bool) {
- if o == nil || o.Usage == nil {
- return nil, false
- }
- return &o.Usage, true
-}
-
-// HasUsage returns a boolean if a field has been set.
-func (o *UsageAttributionResponse) HasUsage() bool {
- return o != nil && o.Usage != nil
-}
-
-// SetUsage gets a reference to the given []UsageAttributionBody and assigns it to the Usage field.
-func (o *UsageAttributionResponse) SetUsage(v []UsageAttributionBody) {
- o.Usage = v
-}
-
-// MarshalJSON serializes the struct using spec logic.
-func (o UsageAttributionResponse) MarshalJSON() ([]byte, error) {
- toSerialize := map[string]interface{}{}
- if o.UnparsedObject != nil {
- return datadog.Marshal(o.UnparsedObject)
- }
- if o.Metadata != nil {
- toSerialize["metadata"] = o.Metadata
- }
- if o.Usage != nil {
- toSerialize["usage"] = o.Usage
- }
-
- for key, value := range o.AdditionalProperties {
- toSerialize[key] = value
- }
- return datadog.Marshal(toSerialize)
-}
-
-// UnmarshalJSON deserializes the given payload.
-func (o *UsageAttributionResponse) UnmarshalJSON(bytes []byte) (err error) {
- all := struct {
- Metadata *UsageAttributionMetadata `json:"metadata,omitempty"`
- Usage []UsageAttributionBody `json:"usage,omitempty"`
- }{}
- if err = datadog.Unmarshal(bytes, &all); err != nil {
- return datadog.Unmarshal(bytes, &o.UnparsedObject)
- }
- additionalProperties := make(map[string]interface{})
- if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
- datadog.DeleteKeys(additionalProperties, &[]string{"metadata", "usage"})
- } else {
- return err
- }
-
- hasInvalidField := false
- if all.Metadata != nil && all.Metadata.UnparsedObject != nil && o.UnparsedObject == nil {
- hasInvalidField = true
- }
- o.Metadata = all.Metadata
- o.Usage = all.Usage
-
- if len(additionalProperties) > 0 {
- o.AdditionalProperties = additionalProperties
- }
-
- if hasInvalidField {
- return datadog.Unmarshal(bytes, &o.UnparsedObject)
- }
-
- return nil
-}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_sort.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_sort.go
deleted file mode 100644
index 51e769821a..0000000000
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_sort.go
+++ /dev/null
@@ -1,138 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2019-Present Datadog, Inc.
-
-package datadogV1
-
-import (
- "fmt"
-
- "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
-)
-
-// UsageAttributionSort The field to sort by.
-type UsageAttributionSort string
-
-// List of UsageAttributionSort.
-const (
- USAGEATTRIBUTIONSORT_API_PERCENTAGE UsageAttributionSort = "api_percentage"
- USAGEATTRIBUTIONSORT_SNMP_USAGE UsageAttributionSort = "snmp_usage"
- USAGEATTRIBUTIONSORT_APM_HOST_USAGE UsageAttributionSort = "apm_host_usage"
- USAGEATTRIBUTIONSORT_API_USAGE UsageAttributionSort = "api_usage"
- USAGEATTRIBUTIONSORT_APPSEC_USAGE UsageAttributionSort = "appsec_usage"
- USAGEATTRIBUTIONSORT_APPSEC_PERCENTAGE UsageAttributionSort = "appsec_percentage"
- USAGEATTRIBUTIONSORT_CONTAINER_USAGE UsageAttributionSort = "container_usage"
- USAGEATTRIBUTIONSORT_CUSTOM_TIMESERIES_PERCENTAGE UsageAttributionSort = "custom_timeseries_percentage"
- USAGEATTRIBUTIONSORT_CONTAINER_PERCENTAGE UsageAttributionSort = "container_percentage"
- USAGEATTRIBUTIONSORT_APM_HOST_PERCENTAGE UsageAttributionSort = "apm_host_percentage"
- USAGEATTRIBUTIONSORT_NPM_HOST_PERCENTAGE UsageAttributionSort = "npm_host_percentage"
- USAGEATTRIBUTIONSORT_BROWSER_PERCENTAGE UsageAttributionSort = "browser_percentage"
- USAGEATTRIBUTIONSORT_BROWSER_USAGE UsageAttributionSort = "browser_usage"
- USAGEATTRIBUTIONSORT_INFRA_HOST_PERCENTAGE UsageAttributionSort = "infra_host_percentage"
- USAGEATTRIBUTIONSORT_SNMP_PERCENTAGE UsageAttributionSort = "snmp_percentage"
- USAGEATTRIBUTIONSORT_NPM_HOST_USAGE UsageAttributionSort = "npm_host_usage"
- USAGEATTRIBUTIONSORT_INFRA_HOST_USAGE UsageAttributionSort = "infra_host_usage"
- USAGEATTRIBUTIONSORT_CUSTOM_TIMESERIES_USAGE UsageAttributionSort = "custom_timeseries_usage"
- USAGEATTRIBUTIONSORT_LAMBDA_FUNCTIONS_USAGE UsageAttributionSort = "lambda_functions_usage"
- USAGEATTRIBUTIONSORT_LAMBDA_FUNCTIONS_PERCENTAGE UsageAttributionSort = "lambda_functions_percentage"
- USAGEATTRIBUTIONSORT_LAMBDA_INVOCATIONS_USAGE UsageAttributionSort = "lambda_invocations_usage"
- USAGEATTRIBUTIONSORT_LAMBDA_INVOCATIONS_PERCENTAGE UsageAttributionSort = "lambda_invocations_percentage"
- USAGEATTRIBUTIONSORT_ESTIMATED_INDEXED_LOGS_USAGE UsageAttributionSort = "estimated_indexed_logs_usage"
- USAGEATTRIBUTIONSORT_ESTIMATED_INDEXED_LOGS_PERCENTAGE UsageAttributionSort = "estimated_indexed_logs_percentage"
- USAGEATTRIBUTIONSORT_ESTIMATED_INGESTED_LOGS_USAGE UsageAttributionSort = "estimated_ingested_logs_usage"
- USAGEATTRIBUTIONSORT_ESTIMATED_INGESTED_LOGS_PERCENTAGE UsageAttributionSort = "estimated_ingested_logs_percentage"
- USAGEATTRIBUTIONSORT_ESTIMATED_INDEXED_SPANS_USAGE UsageAttributionSort = "estimated_indexed_spans_usage"
- USAGEATTRIBUTIONSORT_ESTIMATED_INDEXED_SPANS_PERCENTAGE UsageAttributionSort = "estimated_indexed_spans_percentage"
- USAGEATTRIBUTIONSORT_ESTIMATED_INGESTED_SPANS_USAGE UsageAttributionSort = "estimated_ingested_spans_usage"
- USAGEATTRIBUTIONSORT_ESTIMATED_INGESTED_SPANS_PERCENTAGE UsageAttributionSort = "estimated_ingested_spans_percentage"
- USAGEATTRIBUTIONSORT_APM_FARGATE_USAGE UsageAttributionSort = "apm_fargate_usage"
- USAGEATTRIBUTIONSORT_APM_FARGATE_PERCENTAGE UsageAttributionSort = "apm_fargate_percentage"
- USAGEATTRIBUTIONSORT_APPSEC_FARGATE_USAGE UsageAttributionSort = "appsec_fargate_usage"
- USAGEATTRIBUTIONSORT_APPSEC_FARGATE_PERCENTAGE UsageAttributionSort = "appsec_fargate_percentage"
- USAGEATTRIBUTIONSORT_ESTIMATED_RUM_USAGE_ATTRIBUTION_USAGE UsageAttributionSort = "estimated_rum_usage_attribution_usage"
- USAGEATTRIBUTIONSORT_ESTIMATED_RUM_USAGE_ATTRIBUTION_PERCENTAGE UsageAttributionSort = "estimated_rum_usage_attribution_percentage"
- USAGEATTRIBUTIONSORT_ASM_SERVERLESS_TRACED_INVOCATIONS_USAGE UsageAttributionSort = "asm_serverless_traced_invocations_usage"
- USAGEATTRIBUTIONSORT_ASM_SERVERLESS_TRACED_INVOCATIONS_PERCENTAGE UsageAttributionSort = "asm_serverless_traced_invocations_percentage"
-)
-
-var allowedUsageAttributionSortEnumValues = []UsageAttributionSort{
- USAGEATTRIBUTIONSORT_API_PERCENTAGE,
- USAGEATTRIBUTIONSORT_SNMP_USAGE,
- USAGEATTRIBUTIONSORT_APM_HOST_USAGE,
- USAGEATTRIBUTIONSORT_API_USAGE,
- USAGEATTRIBUTIONSORT_APPSEC_USAGE,
- USAGEATTRIBUTIONSORT_APPSEC_PERCENTAGE,
- USAGEATTRIBUTIONSORT_CONTAINER_USAGE,
- USAGEATTRIBUTIONSORT_CUSTOM_TIMESERIES_PERCENTAGE,
- USAGEATTRIBUTIONSORT_CONTAINER_PERCENTAGE,
- USAGEATTRIBUTIONSORT_APM_HOST_PERCENTAGE,
- USAGEATTRIBUTIONSORT_NPM_HOST_PERCENTAGE,
- USAGEATTRIBUTIONSORT_BROWSER_PERCENTAGE,
- USAGEATTRIBUTIONSORT_BROWSER_USAGE,
- USAGEATTRIBUTIONSORT_INFRA_HOST_PERCENTAGE,
- USAGEATTRIBUTIONSORT_SNMP_PERCENTAGE,
- USAGEATTRIBUTIONSORT_NPM_HOST_USAGE,
- USAGEATTRIBUTIONSORT_INFRA_HOST_USAGE,
- USAGEATTRIBUTIONSORT_CUSTOM_TIMESERIES_USAGE,
- USAGEATTRIBUTIONSORT_LAMBDA_FUNCTIONS_USAGE,
- USAGEATTRIBUTIONSORT_LAMBDA_FUNCTIONS_PERCENTAGE,
- USAGEATTRIBUTIONSORT_LAMBDA_INVOCATIONS_USAGE,
- USAGEATTRIBUTIONSORT_LAMBDA_INVOCATIONS_PERCENTAGE,
- USAGEATTRIBUTIONSORT_ESTIMATED_INDEXED_LOGS_USAGE,
- USAGEATTRIBUTIONSORT_ESTIMATED_INDEXED_LOGS_PERCENTAGE,
- USAGEATTRIBUTIONSORT_ESTIMATED_INGESTED_LOGS_USAGE,
- USAGEATTRIBUTIONSORT_ESTIMATED_INGESTED_LOGS_PERCENTAGE,
- USAGEATTRIBUTIONSORT_ESTIMATED_INDEXED_SPANS_USAGE,
- USAGEATTRIBUTIONSORT_ESTIMATED_INDEXED_SPANS_PERCENTAGE,
- USAGEATTRIBUTIONSORT_ESTIMATED_INGESTED_SPANS_USAGE,
- USAGEATTRIBUTIONSORT_ESTIMATED_INGESTED_SPANS_PERCENTAGE,
- USAGEATTRIBUTIONSORT_APM_FARGATE_USAGE,
- USAGEATTRIBUTIONSORT_APM_FARGATE_PERCENTAGE,
- USAGEATTRIBUTIONSORT_APPSEC_FARGATE_USAGE,
- USAGEATTRIBUTIONSORT_APPSEC_FARGATE_PERCENTAGE,
- USAGEATTRIBUTIONSORT_ESTIMATED_RUM_USAGE_ATTRIBUTION_USAGE,
- USAGEATTRIBUTIONSORT_ESTIMATED_RUM_USAGE_ATTRIBUTION_PERCENTAGE,
- USAGEATTRIBUTIONSORT_ASM_SERVERLESS_TRACED_INVOCATIONS_USAGE,
- USAGEATTRIBUTIONSORT_ASM_SERVERLESS_TRACED_INVOCATIONS_PERCENTAGE,
-}
-
-// GetAllowedValues reeturns the list of possible values.
-func (v *UsageAttributionSort) GetAllowedValues() []UsageAttributionSort {
- return allowedUsageAttributionSortEnumValues
-}
-
-// UnmarshalJSON deserializes the given payload.
-func (v *UsageAttributionSort) UnmarshalJSON(src []byte) error {
- var value string
- err := datadog.Unmarshal(src, &value)
- if err != nil {
- return err
- }
- *v = UsageAttributionSort(value)
- return nil
-}
-
-// NewUsageAttributionSortFromValue returns a pointer to a valid UsageAttributionSort
-// for the value passed as argument, or an error if the value passed is not allowed by the enum.
-func NewUsageAttributionSortFromValue(v string) (*UsageAttributionSort, error) {
- ev := UsageAttributionSort(v)
- if ev.IsValid() {
- return &ev, nil
- }
- return nil, fmt.Errorf("invalid value '%v' for UsageAttributionSort: valid values are %v", v, allowedUsageAttributionSortEnumValues)
-}
-
-// IsValid return true if the value is valid for the enum, false otherwise.
-func (v UsageAttributionSort) IsValid() bool {
- for _, existing := range allowedUsageAttributionSortEnumValues {
- if existing == v {
- return true
- }
- }
- return false
-}
-
-// Ptr returns reference to UsageAttributionSort value.
-func (v UsageAttributionSort) Ptr() *UsageAttributionSort {
- return &v
-}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_supported_metrics.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_supported_metrics.go
deleted file mode 100644
index 60bfc318d1..0000000000
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_supported_metrics.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2019-Present Datadog, Inc.
-
-package datadogV1
-
-import (
- "fmt"
-
- "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
-)
-
-// UsageAttributionSupportedMetrics Supported fields for usage attribution requests (valid requests contain one or more metrics, or `*` for all).
-type UsageAttributionSupportedMetrics string
-
-// List of UsageAttributionSupportedMetrics.
-const (
- USAGEATTRIBUTIONSUPPORTEDMETRICS_CUSTOM_TIMESERIES_USAGE UsageAttributionSupportedMetrics = "custom_timeseries_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_CONTAINER_USAGE UsageAttributionSupportedMetrics = "container_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_SNMP_PERCENTAGE UsageAttributionSupportedMetrics = "snmp_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_APM_HOST_USAGE UsageAttributionSupportedMetrics = "apm_host_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_BROWSER_USAGE UsageAttributionSupportedMetrics = "browser_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_NPM_HOST_PERCENTAGE UsageAttributionSupportedMetrics = "npm_host_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_INFRA_HOST_USAGE UsageAttributionSupportedMetrics = "infra_host_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_CUSTOM_TIMESERIES_PERCENTAGE UsageAttributionSupportedMetrics = "custom_timeseries_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_CONTAINER_PERCENTAGE UsageAttributionSupportedMetrics = "container_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_API_USAGE UsageAttributionSupportedMetrics = "api_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_APM_HOST_PERCENTAGE UsageAttributionSupportedMetrics = "apm_host_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_INFRA_HOST_PERCENTAGE UsageAttributionSupportedMetrics = "infra_host_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_SNMP_USAGE UsageAttributionSupportedMetrics = "snmp_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_BROWSER_PERCENTAGE UsageAttributionSupportedMetrics = "browser_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_API_PERCENTAGE UsageAttributionSupportedMetrics = "api_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_NPM_HOST_USAGE UsageAttributionSupportedMetrics = "npm_host_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_LAMBDA_FUNCTIONS_USAGE UsageAttributionSupportedMetrics = "lambda_functions_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_LAMBDA_FUNCTIONS_PERCENTAGE UsageAttributionSupportedMetrics = "lambda_functions_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_LAMBDA_INVOCATIONS_USAGE UsageAttributionSupportedMetrics = "lambda_invocations_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_LAMBDA_INVOCATIONS_PERCENTAGE UsageAttributionSupportedMetrics = "lambda_invocations_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_FARGATE_USAGE UsageAttributionSupportedMetrics = "fargate_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_FARGATE_PERCENTAGE UsageAttributionSupportedMetrics = "fargate_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_HOST_USAGE UsageAttributionSupportedMetrics = "profiled_host_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_HOST_PERCENTAGE UsageAttributionSupportedMetrics = "profiled_host_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_CONTAINER_USAGE UsageAttributionSupportedMetrics = "profiled_container_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_CONTAINER_PERCENTAGE UsageAttributionSupportedMetrics = "profiled_container_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_DBM_HOSTS_USAGE UsageAttributionSupportedMetrics = "dbm_hosts_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_DBM_HOSTS_PERCENTAGE UsageAttributionSupportedMetrics = "dbm_hosts_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_DBM_QUERIES_USAGE UsageAttributionSupportedMetrics = "dbm_queries_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_DBM_QUERIES_PERCENTAGE UsageAttributionSupportedMetrics = "dbm_queries_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INDEXED_LOGS_USAGE UsageAttributionSupportedMetrics = "estimated_indexed_logs_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INDEXED_LOGS_PERCENTAGE UsageAttributionSupportedMetrics = "estimated_indexed_logs_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INGESTED_LOGS_USAGE UsageAttributionSupportedMetrics = "estimated_ingested_logs_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INGESTED_LOGS_PERCENTAGE UsageAttributionSupportedMetrics = "estimated_ingested_logs_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_APPSEC_USAGE UsageAttributionSupportedMetrics = "appsec_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_APPSEC_PERCENTAGE UsageAttributionSupportedMetrics = "appsec_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INDEXED_SPANS_USAGE UsageAttributionSupportedMetrics = "estimated_indexed_spans_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INDEXED_SPANS_PERCENTAGE UsageAttributionSupportedMetrics = "estimated_indexed_spans_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INGESTED_SPANS_USAGE UsageAttributionSupportedMetrics = "estimated_ingested_spans_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INGESTED_SPANS_PERCENTAGE UsageAttributionSupportedMetrics = "estimated_ingested_spans_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_APM_FARGATE_USAGE UsageAttributionSupportedMetrics = "apm_fargate_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_APM_FARGATE_PERCENTAGE UsageAttributionSupportedMetrics = "apm_fargate_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_APPSEC_FARGATE_USAGE UsageAttributionSupportedMetrics = "appsec_fargate_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_APPSEC_FARGATE_PERCENTAGE UsageAttributionSupportedMetrics = "appsec_fargate_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_RUM_USAGE_ATTRIBUTION_USAGE UsageAttributionSupportedMetrics = "estimated_rum_usage_attribution_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_RUM_USAGE_ATTRIBUTION_PERCENTAGE UsageAttributionSupportedMetrics = "estimated_rum_usage_attribution_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ASM_SERVERLESS_TRACED_INVOCATIONS_USAGE UsageAttributionSupportedMetrics = "asm_serverless_traced_invocations_usage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ASM_SERVERLESS_TRACED_INVOCATIONS_PERCENTAGE UsageAttributionSupportedMetrics = "asm_serverless_traced_invocations_percentage"
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ALL UsageAttributionSupportedMetrics = "*"
-)
-
-var allowedUsageAttributionSupportedMetricsEnumValues = []UsageAttributionSupportedMetrics{
- USAGEATTRIBUTIONSUPPORTEDMETRICS_CUSTOM_TIMESERIES_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_CONTAINER_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_SNMP_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_APM_HOST_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_BROWSER_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_NPM_HOST_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_INFRA_HOST_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_CUSTOM_TIMESERIES_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_CONTAINER_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_API_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_APM_HOST_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_INFRA_HOST_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_SNMP_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_BROWSER_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_API_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_NPM_HOST_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_LAMBDA_FUNCTIONS_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_LAMBDA_FUNCTIONS_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_LAMBDA_INVOCATIONS_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_LAMBDA_INVOCATIONS_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_FARGATE_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_FARGATE_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_HOST_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_HOST_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_CONTAINER_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_PROFILED_CONTAINER_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_DBM_HOSTS_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_DBM_HOSTS_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_DBM_QUERIES_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_DBM_QUERIES_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INDEXED_LOGS_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INDEXED_LOGS_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INGESTED_LOGS_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INGESTED_LOGS_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_APPSEC_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_APPSEC_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INDEXED_SPANS_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INDEXED_SPANS_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INGESTED_SPANS_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_INGESTED_SPANS_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_APM_FARGATE_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_APM_FARGATE_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_APPSEC_FARGATE_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_APPSEC_FARGATE_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_RUM_USAGE_ATTRIBUTION_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ESTIMATED_RUM_USAGE_ATTRIBUTION_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ASM_SERVERLESS_TRACED_INVOCATIONS_USAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ASM_SERVERLESS_TRACED_INVOCATIONS_PERCENTAGE,
- USAGEATTRIBUTIONSUPPORTEDMETRICS_ALL,
-}
-
-// GetAllowedValues reeturns the list of possible values.
-func (v *UsageAttributionSupportedMetrics) GetAllowedValues() []UsageAttributionSupportedMetrics {
- return allowedUsageAttributionSupportedMetricsEnumValues
-}
-
-// UnmarshalJSON deserializes the given payload.
-func (v *UsageAttributionSupportedMetrics) UnmarshalJSON(src []byte) error {
- var value string
- err := datadog.Unmarshal(src, &value)
- if err != nil {
- return err
- }
- *v = UsageAttributionSupportedMetrics(value)
- return nil
-}
-
-// NewUsageAttributionSupportedMetricsFromValue returns a pointer to a valid UsageAttributionSupportedMetrics
-// for the value passed as argument, or an error if the value passed is not allowed by the enum.
-func NewUsageAttributionSupportedMetricsFromValue(v string) (*UsageAttributionSupportedMetrics, error) {
- ev := UsageAttributionSupportedMetrics(v)
- if ev.IsValid() {
- return &ev, nil
- }
- return nil, fmt.Errorf("invalid value '%v' for UsageAttributionSupportedMetrics: valid values are %v", v, allowedUsageAttributionSupportedMetricsEnumValues)
-}
-
-// IsValid return true if the value is valid for the enum, false otherwise.
-func (v UsageAttributionSupportedMetrics) IsValid() bool {
- for _, existing := range allowedUsageAttributionSupportedMetricsEnumValues {
- if existing == v {
- return true
- }
- }
- return false
-}
-
-// Ptr returns reference to UsageAttributionSupportedMetrics value.
-func (v UsageAttributionSupportedMetrics) Ptr() *UsageAttributionSupportedMetrics {
- return &v
-}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_values.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_values.go
deleted file mode 100644
index 2fc9589725..0000000000
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_attribution_values.go
+++ /dev/null
@@ -1,1887 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2019-Present Datadog, Inc.
-
-package datadogV1
-
-import (
- "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
-)
-
-// UsageAttributionValues Fields in Usage Summary by tag(s).
-type UsageAttributionValues struct {
- // The percentage of synthetic API test usage by tag(s).
- ApiPercentage *float64 `json:"api_percentage,omitempty"`
- // The synthetic API test usage by tag(s).
- ApiUsage *float64 `json:"api_usage,omitempty"`
- // The percentage of APM ECS Fargate task usage by tag(s).
- ApmFargatePercentage *float64 `json:"apm_fargate_percentage,omitempty"`
- // The APM ECS Fargate task usage by tag(s).
- ApmFargateUsage *float64 `json:"apm_fargate_usage,omitempty"`
- // The percentage of APM host usage by tag(s).
- ApmHostPercentage *float64 `json:"apm_host_percentage,omitempty"`
- // The APM host usage by tag(s).
- ApmHostUsage *float64 `json:"apm_host_usage,omitempty"`
- // The percentage of Application Security Monitoring ECS Fargate task usage by tag(s).
- AppsecFargatePercentage *float64 `json:"appsec_fargate_percentage,omitempty"`
- // The Application Security Monitoring ECS Fargate task usage by tag(s).
- AppsecFargateUsage *float64 `json:"appsec_fargate_usage,omitempty"`
- // The percentage of Application Security Monitoring host usage by tag(s).
- AppsecPercentage *float64 `json:"appsec_percentage,omitempty"`
- // The Application Security Monitoring host usage by tag(s).
- AppsecUsage *float64 `json:"appsec_usage,omitempty"`
- // The percentage of synthetic browser test usage by tag(s).
- BrowserPercentage *float64 `json:"browser_percentage,omitempty"`
- // The synthetic browser test usage by tag(s).
- BrowserUsage *float64 `json:"browser_usage,omitempty"`
- // The percentage of container usage by tag(s).
- ContainerPercentage *float64 `json:"container_percentage,omitempty"`
- // The container usage by tag(s).
- ContainerUsage *float64 `json:"container_usage,omitempty"`
- // The percentage of Cloud Security Management Pro container usage by tag(s)
- CspmContainerPercentage *float64 `json:"cspm_container_percentage,omitempty"`
- // The Cloud Security Management Pro container usage by tag(s)
- CspmContainerUsage *float64 `json:"cspm_container_usage,omitempty"`
- // The percentage of Cloud Security Management Pro host usage by tag(s)
- CspmHostPercentage *float64 `json:"cspm_host_percentage,omitempty"`
- // The Cloud Security Management Pro host usage by tag(s)
- CspmHostUsage *float64 `json:"cspm_host_usage,omitempty"`
- // The percentage of custom metrics usage by tag(s).
- CustomTimeseriesPercentage *float64 `json:"custom_timeseries_percentage,omitempty"`
- // The custom metrics usage by tag(s).
- CustomTimeseriesUsage *float64 `json:"custom_timeseries_usage,omitempty"`
- // The percentage of Cloud Workload Security container usage by tag(s)
- CwsContainerPercentage *float64 `json:"cws_container_percentage,omitempty"`
- // The Cloud Workload Security container usage by tag(s)
- CwsContainerUsage *float64 `json:"cws_container_usage,omitempty"`
- // The percentage of Cloud Workload Security host usage by tag(s)
- CwsHostPercentage *float64 `json:"cws_host_percentage,omitempty"`
- // The Cloud Workload Security host usage by tag(s)
- CwsHostUsage *float64 `json:"cws_host_usage,omitempty"`
- // The percentage of Database Monitoring host usage by tag(s).
- DbmHostsPercentage *float64 `json:"dbm_hosts_percentage,omitempty"`
- // The Database Monitoring host usage by tag(s).
- DbmHostsUsage *float64 `json:"dbm_hosts_usage,omitempty"`
- // The percentage of Database Monitoring normalized queries usage by tag(s).
- DbmQueriesPercentage *float64 `json:"dbm_queries_percentage,omitempty"`
- // The Database Monitoring normalized queries usage by tag(s).
- DbmQueriesUsage *float64 `json:"dbm_queries_usage,omitempty"`
- // The percentage of estimated live indexed logs usage by tag(s).
- EstimatedIndexedLogsPercentage *float64 `json:"estimated_indexed_logs_percentage,omitempty"`
- // The estimated live indexed logs usage by tag(s).
- EstimatedIndexedLogsUsage *float64 `json:"estimated_indexed_logs_usage,omitempty"`
- // The percentage of estimated indexed spans usage by tag(s).
- EstimatedIndexedSpansPercentage *float64 `json:"estimated_indexed_spans_percentage,omitempty"`
- // The estimated indexed spans usage by tag(s).
- EstimatedIndexedSpansUsage *float64 `json:"estimated_indexed_spans_usage,omitempty"`
- // The percentage of estimated live ingested logs usage by tag(s).
- EstimatedIngestedLogsPercentage *float64 `json:"estimated_ingested_logs_percentage,omitempty"`
- // The estimated live ingested logs usage by tag(s).
- EstimatedIngestedLogsUsage *float64 `json:"estimated_ingested_logs_usage,omitempty"`
- // The percentage of estimated ingested spans usage by tag(s).
- EstimatedIngestedSpansPercentage *float64 `json:"estimated_ingested_spans_percentage,omitempty"`
- // The estimated ingested spans usage by tag(s).
- EstimatedIngestedSpansUsage *float64 `json:"estimated_ingested_spans_usage,omitempty"`
- // The percentage of estimated rum sessions usage by tag(s).
- EstimatedRumSessionsPercentage *float64 `json:"estimated_rum_sessions_percentage,omitempty"`
- // The estimated rum sessions usage by tag(s).
- EstimatedRumSessionsUsage *float64 `json:"estimated_rum_sessions_usage,omitempty"`
- // The percentage of infrastructure host usage by tag(s).
- InfraHostPercentage *float64 `json:"infra_host_percentage,omitempty"`
- // The infrastructure host usage by tag(s).
- InfraHostUsage *float64 `json:"infra_host_usage,omitempty"`
- // The percentage of Lambda function usage by tag(s).
- LambdaFunctionsPercentage *float64 `json:"lambda_functions_percentage,omitempty"`
- // The Lambda function usage by tag(s).
- LambdaFunctionsUsage *float64 `json:"lambda_functions_usage,omitempty"`
- // The percentage of Lambda invocation usage by tag(s).
- LambdaInvocationsPercentage *float64 `json:"lambda_invocations_percentage,omitempty"`
- // The Lambda invocation usage by tag(s).
- LambdaInvocationsUsage *float64 `json:"lambda_invocations_usage,omitempty"`
- // The percentage of network host usage by tag(s).
- NpmHostPercentage *float64 `json:"npm_host_percentage,omitempty"`
- // The network host usage by tag(s).
- NpmHostUsage *float64 `json:"npm_host_usage,omitempty"`
- // The percentage of profiled containers usage by tag(s).
- ProfiledContainerPercentage *float64 `json:"profiled_container_percentage,omitempty"`
- // The profiled container usage by tag(s).
- ProfiledContainerUsage *float64 `json:"profiled_container_usage,omitempty"`
- // The percentage of profiled hosts usage by tag(s).
- ProfiledHostsPercentage *float64 `json:"profiled_hosts_percentage,omitempty"`
- // The profiled host usage by tag(s).
- ProfiledHostsUsage *float64 `json:"profiled_hosts_usage,omitempty"`
- // The percentage of network device usage by tag(s).
- SnmpPercentage *float64 `json:"snmp_percentage,omitempty"`
- // The network device usage by tag(s).
- SnmpUsage *float64 `json:"snmp_usage,omitempty"`
- // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
- UnparsedObject map[string]interface{} `json:"-"`
- AdditionalProperties map[string]interface{}
-}
-
-// NewUsageAttributionValues instantiates a new UsageAttributionValues object.
-// This constructor will assign default values to properties that have it defined,
-// and makes sure properties required by API are set, but the set of arguments
-// will change when the set of required properties is changed.
-func NewUsageAttributionValues() *UsageAttributionValues {
- this := UsageAttributionValues{}
- return &this
-}
-
-// NewUsageAttributionValuesWithDefaults instantiates a new UsageAttributionValues object.
-// This constructor will only assign default values to properties that have it defined,
-// but it doesn't guarantee that properties required by API are set.
-func NewUsageAttributionValuesWithDefaults() *UsageAttributionValues {
- this := UsageAttributionValues{}
- return &this
-}
-
-// GetApiPercentage returns the ApiPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetApiPercentage() float64 {
- if o == nil || o.ApiPercentage == nil {
- var ret float64
- return ret
- }
- return *o.ApiPercentage
-}
-
-// GetApiPercentageOk returns a tuple with the ApiPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetApiPercentageOk() (*float64, bool) {
- if o == nil || o.ApiPercentage == nil {
- return nil, false
- }
- return o.ApiPercentage, true
-}
-
-// HasApiPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasApiPercentage() bool {
- return o != nil && o.ApiPercentage != nil
-}
-
-// SetApiPercentage gets a reference to the given float64 and assigns it to the ApiPercentage field.
-func (o *UsageAttributionValues) SetApiPercentage(v float64) {
- o.ApiPercentage = &v
-}
-
-// GetApiUsage returns the ApiUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetApiUsage() float64 {
- if o == nil || o.ApiUsage == nil {
- var ret float64
- return ret
- }
- return *o.ApiUsage
-}
-
-// GetApiUsageOk returns a tuple with the ApiUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetApiUsageOk() (*float64, bool) {
- if o == nil || o.ApiUsage == nil {
- return nil, false
- }
- return o.ApiUsage, true
-}
-
-// HasApiUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasApiUsage() bool {
- return o != nil && o.ApiUsage != nil
-}
-
-// SetApiUsage gets a reference to the given float64 and assigns it to the ApiUsage field.
-func (o *UsageAttributionValues) SetApiUsage(v float64) {
- o.ApiUsage = &v
-}
-
-// GetApmFargatePercentage returns the ApmFargatePercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetApmFargatePercentage() float64 {
- if o == nil || o.ApmFargatePercentage == nil {
- var ret float64
- return ret
- }
- return *o.ApmFargatePercentage
-}
-
-// GetApmFargatePercentageOk returns a tuple with the ApmFargatePercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetApmFargatePercentageOk() (*float64, bool) {
- if o == nil || o.ApmFargatePercentage == nil {
- return nil, false
- }
- return o.ApmFargatePercentage, true
-}
-
-// HasApmFargatePercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasApmFargatePercentage() bool {
- return o != nil && o.ApmFargatePercentage != nil
-}
-
-// SetApmFargatePercentage gets a reference to the given float64 and assigns it to the ApmFargatePercentage field.
-func (o *UsageAttributionValues) SetApmFargatePercentage(v float64) {
- o.ApmFargatePercentage = &v
-}
-
-// GetApmFargateUsage returns the ApmFargateUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetApmFargateUsage() float64 {
- if o == nil || o.ApmFargateUsage == nil {
- var ret float64
- return ret
- }
- return *o.ApmFargateUsage
-}
-
-// GetApmFargateUsageOk returns a tuple with the ApmFargateUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetApmFargateUsageOk() (*float64, bool) {
- if o == nil || o.ApmFargateUsage == nil {
- return nil, false
- }
- return o.ApmFargateUsage, true
-}
-
-// HasApmFargateUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasApmFargateUsage() bool {
- return o != nil && o.ApmFargateUsage != nil
-}
-
-// SetApmFargateUsage gets a reference to the given float64 and assigns it to the ApmFargateUsage field.
-func (o *UsageAttributionValues) SetApmFargateUsage(v float64) {
- o.ApmFargateUsage = &v
-}
-
-// GetApmHostPercentage returns the ApmHostPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetApmHostPercentage() float64 {
- if o == nil || o.ApmHostPercentage == nil {
- var ret float64
- return ret
- }
- return *o.ApmHostPercentage
-}
-
-// GetApmHostPercentageOk returns a tuple with the ApmHostPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetApmHostPercentageOk() (*float64, bool) {
- if o == nil || o.ApmHostPercentage == nil {
- return nil, false
- }
- return o.ApmHostPercentage, true
-}
-
-// HasApmHostPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasApmHostPercentage() bool {
- return o != nil && o.ApmHostPercentage != nil
-}
-
-// SetApmHostPercentage gets a reference to the given float64 and assigns it to the ApmHostPercentage field.
-func (o *UsageAttributionValues) SetApmHostPercentage(v float64) {
- o.ApmHostPercentage = &v
-}
-
-// GetApmHostUsage returns the ApmHostUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetApmHostUsage() float64 {
- if o == nil || o.ApmHostUsage == nil {
- var ret float64
- return ret
- }
- return *o.ApmHostUsage
-}
-
-// GetApmHostUsageOk returns a tuple with the ApmHostUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetApmHostUsageOk() (*float64, bool) {
- if o == nil || o.ApmHostUsage == nil {
- return nil, false
- }
- return o.ApmHostUsage, true
-}
-
-// HasApmHostUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasApmHostUsage() bool {
- return o != nil && o.ApmHostUsage != nil
-}
-
-// SetApmHostUsage gets a reference to the given float64 and assigns it to the ApmHostUsage field.
-func (o *UsageAttributionValues) SetApmHostUsage(v float64) {
- o.ApmHostUsage = &v
-}
-
-// GetAppsecFargatePercentage returns the AppsecFargatePercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetAppsecFargatePercentage() float64 {
- if o == nil || o.AppsecFargatePercentage == nil {
- var ret float64
- return ret
- }
- return *o.AppsecFargatePercentage
-}
-
-// GetAppsecFargatePercentageOk returns a tuple with the AppsecFargatePercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetAppsecFargatePercentageOk() (*float64, bool) {
- if o == nil || o.AppsecFargatePercentage == nil {
- return nil, false
- }
- return o.AppsecFargatePercentage, true
-}
-
-// HasAppsecFargatePercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasAppsecFargatePercentage() bool {
- return o != nil && o.AppsecFargatePercentage != nil
-}
-
-// SetAppsecFargatePercentage gets a reference to the given float64 and assigns it to the AppsecFargatePercentage field.
-func (o *UsageAttributionValues) SetAppsecFargatePercentage(v float64) {
- o.AppsecFargatePercentage = &v
-}
-
-// GetAppsecFargateUsage returns the AppsecFargateUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetAppsecFargateUsage() float64 {
- if o == nil || o.AppsecFargateUsage == nil {
- var ret float64
- return ret
- }
- return *o.AppsecFargateUsage
-}
-
-// GetAppsecFargateUsageOk returns a tuple with the AppsecFargateUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetAppsecFargateUsageOk() (*float64, bool) {
- if o == nil || o.AppsecFargateUsage == nil {
- return nil, false
- }
- return o.AppsecFargateUsage, true
-}
-
-// HasAppsecFargateUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasAppsecFargateUsage() bool {
- return o != nil && o.AppsecFargateUsage != nil
-}
-
-// SetAppsecFargateUsage gets a reference to the given float64 and assigns it to the AppsecFargateUsage field.
-func (o *UsageAttributionValues) SetAppsecFargateUsage(v float64) {
- o.AppsecFargateUsage = &v
-}
-
-// GetAppsecPercentage returns the AppsecPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetAppsecPercentage() float64 {
- if o == nil || o.AppsecPercentage == nil {
- var ret float64
- return ret
- }
- return *o.AppsecPercentage
-}
-
-// GetAppsecPercentageOk returns a tuple with the AppsecPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetAppsecPercentageOk() (*float64, bool) {
- if o == nil || o.AppsecPercentage == nil {
- return nil, false
- }
- return o.AppsecPercentage, true
-}
-
-// HasAppsecPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasAppsecPercentage() bool {
- return o != nil && o.AppsecPercentage != nil
-}
-
-// SetAppsecPercentage gets a reference to the given float64 and assigns it to the AppsecPercentage field.
-func (o *UsageAttributionValues) SetAppsecPercentage(v float64) {
- o.AppsecPercentage = &v
-}
-
-// GetAppsecUsage returns the AppsecUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetAppsecUsage() float64 {
- if o == nil || o.AppsecUsage == nil {
- var ret float64
- return ret
- }
- return *o.AppsecUsage
-}
-
-// GetAppsecUsageOk returns a tuple with the AppsecUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetAppsecUsageOk() (*float64, bool) {
- if o == nil || o.AppsecUsage == nil {
- return nil, false
- }
- return o.AppsecUsage, true
-}
-
-// HasAppsecUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasAppsecUsage() bool {
- return o != nil && o.AppsecUsage != nil
-}
-
-// SetAppsecUsage gets a reference to the given float64 and assigns it to the AppsecUsage field.
-func (o *UsageAttributionValues) SetAppsecUsage(v float64) {
- o.AppsecUsage = &v
-}
-
-// GetBrowserPercentage returns the BrowserPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetBrowserPercentage() float64 {
- if o == nil || o.BrowserPercentage == nil {
- var ret float64
- return ret
- }
- return *o.BrowserPercentage
-}
-
-// GetBrowserPercentageOk returns a tuple with the BrowserPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetBrowserPercentageOk() (*float64, bool) {
- if o == nil || o.BrowserPercentage == nil {
- return nil, false
- }
- return o.BrowserPercentage, true
-}
-
-// HasBrowserPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasBrowserPercentage() bool {
- return o != nil && o.BrowserPercentage != nil
-}
-
-// SetBrowserPercentage gets a reference to the given float64 and assigns it to the BrowserPercentage field.
-func (o *UsageAttributionValues) SetBrowserPercentage(v float64) {
- o.BrowserPercentage = &v
-}
-
-// GetBrowserUsage returns the BrowserUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetBrowserUsage() float64 {
- if o == nil || o.BrowserUsage == nil {
- var ret float64
- return ret
- }
- return *o.BrowserUsage
-}
-
-// GetBrowserUsageOk returns a tuple with the BrowserUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetBrowserUsageOk() (*float64, bool) {
- if o == nil || o.BrowserUsage == nil {
- return nil, false
- }
- return o.BrowserUsage, true
-}
-
-// HasBrowserUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasBrowserUsage() bool {
- return o != nil && o.BrowserUsage != nil
-}
-
-// SetBrowserUsage gets a reference to the given float64 and assigns it to the BrowserUsage field.
-func (o *UsageAttributionValues) SetBrowserUsage(v float64) {
- o.BrowserUsage = &v
-}
-
-// GetContainerPercentage returns the ContainerPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetContainerPercentage() float64 {
- if o == nil || o.ContainerPercentage == nil {
- var ret float64
- return ret
- }
- return *o.ContainerPercentage
-}
-
-// GetContainerPercentageOk returns a tuple with the ContainerPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetContainerPercentageOk() (*float64, bool) {
- if o == nil || o.ContainerPercentage == nil {
- return nil, false
- }
- return o.ContainerPercentage, true
-}
-
-// HasContainerPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasContainerPercentage() bool {
- return o != nil && o.ContainerPercentage != nil
-}
-
-// SetContainerPercentage gets a reference to the given float64 and assigns it to the ContainerPercentage field.
-func (o *UsageAttributionValues) SetContainerPercentage(v float64) {
- o.ContainerPercentage = &v
-}
-
-// GetContainerUsage returns the ContainerUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetContainerUsage() float64 {
- if o == nil || o.ContainerUsage == nil {
- var ret float64
- return ret
- }
- return *o.ContainerUsage
-}
-
-// GetContainerUsageOk returns a tuple with the ContainerUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetContainerUsageOk() (*float64, bool) {
- if o == nil || o.ContainerUsage == nil {
- return nil, false
- }
- return o.ContainerUsage, true
-}
-
-// HasContainerUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasContainerUsage() bool {
- return o != nil && o.ContainerUsage != nil
-}
-
-// SetContainerUsage gets a reference to the given float64 and assigns it to the ContainerUsage field.
-func (o *UsageAttributionValues) SetContainerUsage(v float64) {
- o.ContainerUsage = &v
-}
-
-// GetCspmContainerPercentage returns the CspmContainerPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetCspmContainerPercentage() float64 {
- if o == nil || o.CspmContainerPercentage == nil {
- var ret float64
- return ret
- }
- return *o.CspmContainerPercentage
-}
-
-// GetCspmContainerPercentageOk returns a tuple with the CspmContainerPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetCspmContainerPercentageOk() (*float64, bool) {
- if o == nil || o.CspmContainerPercentage == nil {
- return nil, false
- }
- return o.CspmContainerPercentage, true
-}
-
-// HasCspmContainerPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasCspmContainerPercentage() bool {
- return o != nil && o.CspmContainerPercentage != nil
-}
-
-// SetCspmContainerPercentage gets a reference to the given float64 and assigns it to the CspmContainerPercentage field.
-func (o *UsageAttributionValues) SetCspmContainerPercentage(v float64) {
- o.CspmContainerPercentage = &v
-}
-
-// GetCspmContainerUsage returns the CspmContainerUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetCspmContainerUsage() float64 {
- if o == nil || o.CspmContainerUsage == nil {
- var ret float64
- return ret
- }
- return *o.CspmContainerUsage
-}
-
-// GetCspmContainerUsageOk returns a tuple with the CspmContainerUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetCspmContainerUsageOk() (*float64, bool) {
- if o == nil || o.CspmContainerUsage == nil {
- return nil, false
- }
- return o.CspmContainerUsage, true
-}
-
-// HasCspmContainerUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasCspmContainerUsage() bool {
- return o != nil && o.CspmContainerUsage != nil
-}
-
-// SetCspmContainerUsage gets a reference to the given float64 and assigns it to the CspmContainerUsage field.
-func (o *UsageAttributionValues) SetCspmContainerUsage(v float64) {
- o.CspmContainerUsage = &v
-}
-
-// GetCspmHostPercentage returns the CspmHostPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetCspmHostPercentage() float64 {
- if o == nil || o.CspmHostPercentage == nil {
- var ret float64
- return ret
- }
- return *o.CspmHostPercentage
-}
-
-// GetCspmHostPercentageOk returns a tuple with the CspmHostPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetCspmHostPercentageOk() (*float64, bool) {
- if o == nil || o.CspmHostPercentage == nil {
- return nil, false
- }
- return o.CspmHostPercentage, true
-}
-
-// HasCspmHostPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasCspmHostPercentage() bool {
- return o != nil && o.CspmHostPercentage != nil
-}
-
-// SetCspmHostPercentage gets a reference to the given float64 and assigns it to the CspmHostPercentage field.
-func (o *UsageAttributionValues) SetCspmHostPercentage(v float64) {
- o.CspmHostPercentage = &v
-}
-
-// GetCspmHostUsage returns the CspmHostUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetCspmHostUsage() float64 {
- if o == nil || o.CspmHostUsage == nil {
- var ret float64
- return ret
- }
- return *o.CspmHostUsage
-}
-
-// GetCspmHostUsageOk returns a tuple with the CspmHostUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetCspmHostUsageOk() (*float64, bool) {
- if o == nil || o.CspmHostUsage == nil {
- return nil, false
- }
- return o.CspmHostUsage, true
-}
-
-// HasCspmHostUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasCspmHostUsage() bool {
- return o != nil && o.CspmHostUsage != nil
-}
-
-// SetCspmHostUsage gets a reference to the given float64 and assigns it to the CspmHostUsage field.
-func (o *UsageAttributionValues) SetCspmHostUsage(v float64) {
- o.CspmHostUsage = &v
-}
-
-// GetCustomTimeseriesPercentage returns the CustomTimeseriesPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetCustomTimeseriesPercentage() float64 {
- if o == nil || o.CustomTimeseriesPercentage == nil {
- var ret float64
- return ret
- }
- return *o.CustomTimeseriesPercentage
-}
-
-// GetCustomTimeseriesPercentageOk returns a tuple with the CustomTimeseriesPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetCustomTimeseriesPercentageOk() (*float64, bool) {
- if o == nil || o.CustomTimeseriesPercentage == nil {
- return nil, false
- }
- return o.CustomTimeseriesPercentage, true
-}
-
-// HasCustomTimeseriesPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasCustomTimeseriesPercentage() bool {
- return o != nil && o.CustomTimeseriesPercentage != nil
-}
-
-// SetCustomTimeseriesPercentage gets a reference to the given float64 and assigns it to the CustomTimeseriesPercentage field.
-func (o *UsageAttributionValues) SetCustomTimeseriesPercentage(v float64) {
- o.CustomTimeseriesPercentage = &v
-}
-
-// GetCustomTimeseriesUsage returns the CustomTimeseriesUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetCustomTimeseriesUsage() float64 {
- if o == nil || o.CustomTimeseriesUsage == nil {
- var ret float64
- return ret
- }
- return *o.CustomTimeseriesUsage
-}
-
-// GetCustomTimeseriesUsageOk returns a tuple with the CustomTimeseriesUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetCustomTimeseriesUsageOk() (*float64, bool) {
- if o == nil || o.CustomTimeseriesUsage == nil {
- return nil, false
- }
- return o.CustomTimeseriesUsage, true
-}
-
-// HasCustomTimeseriesUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasCustomTimeseriesUsage() bool {
- return o != nil && o.CustomTimeseriesUsage != nil
-}
-
-// SetCustomTimeseriesUsage gets a reference to the given float64 and assigns it to the CustomTimeseriesUsage field.
-func (o *UsageAttributionValues) SetCustomTimeseriesUsage(v float64) {
- o.CustomTimeseriesUsage = &v
-}
-
-// GetCwsContainerPercentage returns the CwsContainerPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetCwsContainerPercentage() float64 {
- if o == nil || o.CwsContainerPercentage == nil {
- var ret float64
- return ret
- }
- return *o.CwsContainerPercentage
-}
-
-// GetCwsContainerPercentageOk returns a tuple with the CwsContainerPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetCwsContainerPercentageOk() (*float64, bool) {
- if o == nil || o.CwsContainerPercentage == nil {
- return nil, false
- }
- return o.CwsContainerPercentage, true
-}
-
-// HasCwsContainerPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasCwsContainerPercentage() bool {
- return o != nil && o.CwsContainerPercentage != nil
-}
-
-// SetCwsContainerPercentage gets a reference to the given float64 and assigns it to the CwsContainerPercentage field.
-func (o *UsageAttributionValues) SetCwsContainerPercentage(v float64) {
- o.CwsContainerPercentage = &v
-}
-
-// GetCwsContainerUsage returns the CwsContainerUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetCwsContainerUsage() float64 {
- if o == nil || o.CwsContainerUsage == nil {
- var ret float64
- return ret
- }
- return *o.CwsContainerUsage
-}
-
-// GetCwsContainerUsageOk returns a tuple with the CwsContainerUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetCwsContainerUsageOk() (*float64, bool) {
- if o == nil || o.CwsContainerUsage == nil {
- return nil, false
- }
- return o.CwsContainerUsage, true
-}
-
-// HasCwsContainerUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasCwsContainerUsage() bool {
- return o != nil && o.CwsContainerUsage != nil
-}
-
-// SetCwsContainerUsage gets a reference to the given float64 and assigns it to the CwsContainerUsage field.
-func (o *UsageAttributionValues) SetCwsContainerUsage(v float64) {
- o.CwsContainerUsage = &v
-}
-
-// GetCwsHostPercentage returns the CwsHostPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetCwsHostPercentage() float64 {
- if o == nil || o.CwsHostPercentage == nil {
- var ret float64
- return ret
- }
- return *o.CwsHostPercentage
-}
-
-// GetCwsHostPercentageOk returns a tuple with the CwsHostPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetCwsHostPercentageOk() (*float64, bool) {
- if o == nil || o.CwsHostPercentage == nil {
- return nil, false
- }
- return o.CwsHostPercentage, true
-}
-
-// HasCwsHostPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasCwsHostPercentage() bool {
- return o != nil && o.CwsHostPercentage != nil
-}
-
-// SetCwsHostPercentage gets a reference to the given float64 and assigns it to the CwsHostPercentage field.
-func (o *UsageAttributionValues) SetCwsHostPercentage(v float64) {
- o.CwsHostPercentage = &v
-}
-
-// GetCwsHostUsage returns the CwsHostUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetCwsHostUsage() float64 {
- if o == nil || o.CwsHostUsage == nil {
- var ret float64
- return ret
- }
- return *o.CwsHostUsage
-}
-
-// GetCwsHostUsageOk returns a tuple with the CwsHostUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetCwsHostUsageOk() (*float64, bool) {
- if o == nil || o.CwsHostUsage == nil {
- return nil, false
- }
- return o.CwsHostUsage, true
-}
-
-// HasCwsHostUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasCwsHostUsage() bool {
- return o != nil && o.CwsHostUsage != nil
-}
-
-// SetCwsHostUsage gets a reference to the given float64 and assigns it to the CwsHostUsage field.
-func (o *UsageAttributionValues) SetCwsHostUsage(v float64) {
- o.CwsHostUsage = &v
-}
-
-// GetDbmHostsPercentage returns the DbmHostsPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetDbmHostsPercentage() float64 {
- if o == nil || o.DbmHostsPercentage == nil {
- var ret float64
- return ret
- }
- return *o.DbmHostsPercentage
-}
-
-// GetDbmHostsPercentageOk returns a tuple with the DbmHostsPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetDbmHostsPercentageOk() (*float64, bool) {
- if o == nil || o.DbmHostsPercentage == nil {
- return nil, false
- }
- return o.DbmHostsPercentage, true
-}
-
-// HasDbmHostsPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasDbmHostsPercentage() bool {
- return o != nil && o.DbmHostsPercentage != nil
-}
-
-// SetDbmHostsPercentage gets a reference to the given float64 and assigns it to the DbmHostsPercentage field.
-func (o *UsageAttributionValues) SetDbmHostsPercentage(v float64) {
- o.DbmHostsPercentage = &v
-}
-
-// GetDbmHostsUsage returns the DbmHostsUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetDbmHostsUsage() float64 {
- if o == nil || o.DbmHostsUsage == nil {
- var ret float64
- return ret
- }
- return *o.DbmHostsUsage
-}
-
-// GetDbmHostsUsageOk returns a tuple with the DbmHostsUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetDbmHostsUsageOk() (*float64, bool) {
- if o == nil || o.DbmHostsUsage == nil {
- return nil, false
- }
- return o.DbmHostsUsage, true
-}
-
-// HasDbmHostsUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasDbmHostsUsage() bool {
- return o != nil && o.DbmHostsUsage != nil
-}
-
-// SetDbmHostsUsage gets a reference to the given float64 and assigns it to the DbmHostsUsage field.
-func (o *UsageAttributionValues) SetDbmHostsUsage(v float64) {
- o.DbmHostsUsage = &v
-}
-
-// GetDbmQueriesPercentage returns the DbmQueriesPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetDbmQueriesPercentage() float64 {
- if o == nil || o.DbmQueriesPercentage == nil {
- var ret float64
- return ret
- }
- return *o.DbmQueriesPercentage
-}
-
-// GetDbmQueriesPercentageOk returns a tuple with the DbmQueriesPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetDbmQueriesPercentageOk() (*float64, bool) {
- if o == nil || o.DbmQueriesPercentage == nil {
- return nil, false
- }
- return o.DbmQueriesPercentage, true
-}
-
-// HasDbmQueriesPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasDbmQueriesPercentage() bool {
- return o != nil && o.DbmQueriesPercentage != nil
-}
-
-// SetDbmQueriesPercentage gets a reference to the given float64 and assigns it to the DbmQueriesPercentage field.
-func (o *UsageAttributionValues) SetDbmQueriesPercentage(v float64) {
- o.DbmQueriesPercentage = &v
-}
-
-// GetDbmQueriesUsage returns the DbmQueriesUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetDbmQueriesUsage() float64 {
- if o == nil || o.DbmQueriesUsage == nil {
- var ret float64
- return ret
- }
- return *o.DbmQueriesUsage
-}
-
-// GetDbmQueriesUsageOk returns a tuple with the DbmQueriesUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetDbmQueriesUsageOk() (*float64, bool) {
- if o == nil || o.DbmQueriesUsage == nil {
- return nil, false
- }
- return o.DbmQueriesUsage, true
-}
-
-// HasDbmQueriesUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasDbmQueriesUsage() bool {
- return o != nil && o.DbmQueriesUsage != nil
-}
-
-// SetDbmQueriesUsage gets a reference to the given float64 and assigns it to the DbmQueriesUsage field.
-func (o *UsageAttributionValues) SetDbmQueriesUsage(v float64) {
- o.DbmQueriesUsage = &v
-}
-
-// GetEstimatedIndexedLogsPercentage returns the EstimatedIndexedLogsPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetEstimatedIndexedLogsPercentage() float64 {
- if o == nil || o.EstimatedIndexedLogsPercentage == nil {
- var ret float64
- return ret
- }
- return *o.EstimatedIndexedLogsPercentage
-}
-
-// GetEstimatedIndexedLogsPercentageOk returns a tuple with the EstimatedIndexedLogsPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetEstimatedIndexedLogsPercentageOk() (*float64, bool) {
- if o == nil || o.EstimatedIndexedLogsPercentage == nil {
- return nil, false
- }
- return o.EstimatedIndexedLogsPercentage, true
-}
-
-// HasEstimatedIndexedLogsPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasEstimatedIndexedLogsPercentage() bool {
- return o != nil && o.EstimatedIndexedLogsPercentage != nil
-}
-
-// SetEstimatedIndexedLogsPercentage gets a reference to the given float64 and assigns it to the EstimatedIndexedLogsPercentage field.
-func (o *UsageAttributionValues) SetEstimatedIndexedLogsPercentage(v float64) {
- o.EstimatedIndexedLogsPercentage = &v
-}
-
-// GetEstimatedIndexedLogsUsage returns the EstimatedIndexedLogsUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetEstimatedIndexedLogsUsage() float64 {
- if o == nil || o.EstimatedIndexedLogsUsage == nil {
- var ret float64
- return ret
- }
- return *o.EstimatedIndexedLogsUsage
-}
-
-// GetEstimatedIndexedLogsUsageOk returns a tuple with the EstimatedIndexedLogsUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetEstimatedIndexedLogsUsageOk() (*float64, bool) {
- if o == nil || o.EstimatedIndexedLogsUsage == nil {
- return nil, false
- }
- return o.EstimatedIndexedLogsUsage, true
-}
-
-// HasEstimatedIndexedLogsUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasEstimatedIndexedLogsUsage() bool {
- return o != nil && o.EstimatedIndexedLogsUsage != nil
-}
-
-// SetEstimatedIndexedLogsUsage gets a reference to the given float64 and assigns it to the EstimatedIndexedLogsUsage field.
-func (o *UsageAttributionValues) SetEstimatedIndexedLogsUsage(v float64) {
- o.EstimatedIndexedLogsUsage = &v
-}
-
-// GetEstimatedIndexedSpansPercentage returns the EstimatedIndexedSpansPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetEstimatedIndexedSpansPercentage() float64 {
- if o == nil || o.EstimatedIndexedSpansPercentage == nil {
- var ret float64
- return ret
- }
- return *o.EstimatedIndexedSpansPercentage
-}
-
-// GetEstimatedIndexedSpansPercentageOk returns a tuple with the EstimatedIndexedSpansPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetEstimatedIndexedSpansPercentageOk() (*float64, bool) {
- if o == nil || o.EstimatedIndexedSpansPercentage == nil {
- return nil, false
- }
- return o.EstimatedIndexedSpansPercentage, true
-}
-
-// HasEstimatedIndexedSpansPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasEstimatedIndexedSpansPercentage() bool {
- return o != nil && o.EstimatedIndexedSpansPercentage != nil
-}
-
-// SetEstimatedIndexedSpansPercentage gets a reference to the given float64 and assigns it to the EstimatedIndexedSpansPercentage field.
-func (o *UsageAttributionValues) SetEstimatedIndexedSpansPercentage(v float64) {
- o.EstimatedIndexedSpansPercentage = &v
-}
-
-// GetEstimatedIndexedSpansUsage returns the EstimatedIndexedSpansUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetEstimatedIndexedSpansUsage() float64 {
- if o == nil || o.EstimatedIndexedSpansUsage == nil {
- var ret float64
- return ret
- }
- return *o.EstimatedIndexedSpansUsage
-}
-
-// GetEstimatedIndexedSpansUsageOk returns a tuple with the EstimatedIndexedSpansUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetEstimatedIndexedSpansUsageOk() (*float64, bool) {
- if o == nil || o.EstimatedIndexedSpansUsage == nil {
- return nil, false
- }
- return o.EstimatedIndexedSpansUsage, true
-}
-
-// HasEstimatedIndexedSpansUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasEstimatedIndexedSpansUsage() bool {
- return o != nil && o.EstimatedIndexedSpansUsage != nil
-}
-
-// SetEstimatedIndexedSpansUsage gets a reference to the given float64 and assigns it to the EstimatedIndexedSpansUsage field.
-func (o *UsageAttributionValues) SetEstimatedIndexedSpansUsage(v float64) {
- o.EstimatedIndexedSpansUsage = &v
-}
-
-// GetEstimatedIngestedLogsPercentage returns the EstimatedIngestedLogsPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetEstimatedIngestedLogsPercentage() float64 {
- if o == nil || o.EstimatedIngestedLogsPercentage == nil {
- var ret float64
- return ret
- }
- return *o.EstimatedIngestedLogsPercentage
-}
-
-// GetEstimatedIngestedLogsPercentageOk returns a tuple with the EstimatedIngestedLogsPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetEstimatedIngestedLogsPercentageOk() (*float64, bool) {
- if o == nil || o.EstimatedIngestedLogsPercentage == nil {
- return nil, false
- }
- return o.EstimatedIngestedLogsPercentage, true
-}
-
-// HasEstimatedIngestedLogsPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasEstimatedIngestedLogsPercentage() bool {
- return o != nil && o.EstimatedIngestedLogsPercentage != nil
-}
-
-// SetEstimatedIngestedLogsPercentage gets a reference to the given float64 and assigns it to the EstimatedIngestedLogsPercentage field.
-func (o *UsageAttributionValues) SetEstimatedIngestedLogsPercentage(v float64) {
- o.EstimatedIngestedLogsPercentage = &v
-}
-
-// GetEstimatedIngestedLogsUsage returns the EstimatedIngestedLogsUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetEstimatedIngestedLogsUsage() float64 {
- if o == nil || o.EstimatedIngestedLogsUsage == nil {
- var ret float64
- return ret
- }
- return *o.EstimatedIngestedLogsUsage
-}
-
-// GetEstimatedIngestedLogsUsageOk returns a tuple with the EstimatedIngestedLogsUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetEstimatedIngestedLogsUsageOk() (*float64, bool) {
- if o == nil || o.EstimatedIngestedLogsUsage == nil {
- return nil, false
- }
- return o.EstimatedIngestedLogsUsage, true
-}
-
-// HasEstimatedIngestedLogsUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasEstimatedIngestedLogsUsage() bool {
- return o != nil && o.EstimatedIngestedLogsUsage != nil
-}
-
-// SetEstimatedIngestedLogsUsage gets a reference to the given float64 and assigns it to the EstimatedIngestedLogsUsage field.
-func (o *UsageAttributionValues) SetEstimatedIngestedLogsUsage(v float64) {
- o.EstimatedIngestedLogsUsage = &v
-}
-
-// GetEstimatedIngestedSpansPercentage returns the EstimatedIngestedSpansPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetEstimatedIngestedSpansPercentage() float64 {
- if o == nil || o.EstimatedIngestedSpansPercentage == nil {
- var ret float64
- return ret
- }
- return *o.EstimatedIngestedSpansPercentage
-}
-
-// GetEstimatedIngestedSpansPercentageOk returns a tuple with the EstimatedIngestedSpansPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetEstimatedIngestedSpansPercentageOk() (*float64, bool) {
- if o == nil || o.EstimatedIngestedSpansPercentage == nil {
- return nil, false
- }
- return o.EstimatedIngestedSpansPercentage, true
-}
-
-// HasEstimatedIngestedSpansPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasEstimatedIngestedSpansPercentage() bool {
- return o != nil && o.EstimatedIngestedSpansPercentage != nil
-}
-
-// SetEstimatedIngestedSpansPercentage gets a reference to the given float64 and assigns it to the EstimatedIngestedSpansPercentage field.
-func (o *UsageAttributionValues) SetEstimatedIngestedSpansPercentage(v float64) {
- o.EstimatedIngestedSpansPercentage = &v
-}
-
-// GetEstimatedIngestedSpansUsage returns the EstimatedIngestedSpansUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetEstimatedIngestedSpansUsage() float64 {
- if o == nil || o.EstimatedIngestedSpansUsage == nil {
- var ret float64
- return ret
- }
- return *o.EstimatedIngestedSpansUsage
-}
-
-// GetEstimatedIngestedSpansUsageOk returns a tuple with the EstimatedIngestedSpansUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetEstimatedIngestedSpansUsageOk() (*float64, bool) {
- if o == nil || o.EstimatedIngestedSpansUsage == nil {
- return nil, false
- }
- return o.EstimatedIngestedSpansUsage, true
-}
-
-// HasEstimatedIngestedSpansUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasEstimatedIngestedSpansUsage() bool {
- return o != nil && o.EstimatedIngestedSpansUsage != nil
-}
-
-// SetEstimatedIngestedSpansUsage gets a reference to the given float64 and assigns it to the EstimatedIngestedSpansUsage field.
-func (o *UsageAttributionValues) SetEstimatedIngestedSpansUsage(v float64) {
- o.EstimatedIngestedSpansUsage = &v
-}
-
-// GetEstimatedRumSessionsPercentage returns the EstimatedRumSessionsPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetEstimatedRumSessionsPercentage() float64 {
- if o == nil || o.EstimatedRumSessionsPercentage == nil {
- var ret float64
- return ret
- }
- return *o.EstimatedRumSessionsPercentage
-}
-
-// GetEstimatedRumSessionsPercentageOk returns a tuple with the EstimatedRumSessionsPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetEstimatedRumSessionsPercentageOk() (*float64, bool) {
- if o == nil || o.EstimatedRumSessionsPercentage == nil {
- return nil, false
- }
- return o.EstimatedRumSessionsPercentage, true
-}
-
-// HasEstimatedRumSessionsPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasEstimatedRumSessionsPercentage() bool {
- return o != nil && o.EstimatedRumSessionsPercentage != nil
-}
-
-// SetEstimatedRumSessionsPercentage gets a reference to the given float64 and assigns it to the EstimatedRumSessionsPercentage field.
-func (o *UsageAttributionValues) SetEstimatedRumSessionsPercentage(v float64) {
- o.EstimatedRumSessionsPercentage = &v
-}
-
-// GetEstimatedRumSessionsUsage returns the EstimatedRumSessionsUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetEstimatedRumSessionsUsage() float64 {
- if o == nil || o.EstimatedRumSessionsUsage == nil {
- var ret float64
- return ret
- }
- return *o.EstimatedRumSessionsUsage
-}
-
-// GetEstimatedRumSessionsUsageOk returns a tuple with the EstimatedRumSessionsUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetEstimatedRumSessionsUsageOk() (*float64, bool) {
- if o == nil || o.EstimatedRumSessionsUsage == nil {
- return nil, false
- }
- return o.EstimatedRumSessionsUsage, true
-}
-
-// HasEstimatedRumSessionsUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasEstimatedRumSessionsUsage() bool {
- return o != nil && o.EstimatedRumSessionsUsage != nil
-}
-
-// SetEstimatedRumSessionsUsage gets a reference to the given float64 and assigns it to the EstimatedRumSessionsUsage field.
-func (o *UsageAttributionValues) SetEstimatedRumSessionsUsage(v float64) {
- o.EstimatedRumSessionsUsage = &v
-}
-
-// GetInfraHostPercentage returns the InfraHostPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetInfraHostPercentage() float64 {
- if o == nil || o.InfraHostPercentage == nil {
- var ret float64
- return ret
- }
- return *o.InfraHostPercentage
-}
-
-// GetInfraHostPercentageOk returns a tuple with the InfraHostPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetInfraHostPercentageOk() (*float64, bool) {
- if o == nil || o.InfraHostPercentage == nil {
- return nil, false
- }
- return o.InfraHostPercentage, true
-}
-
-// HasInfraHostPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasInfraHostPercentage() bool {
- return o != nil && o.InfraHostPercentage != nil
-}
-
-// SetInfraHostPercentage gets a reference to the given float64 and assigns it to the InfraHostPercentage field.
-func (o *UsageAttributionValues) SetInfraHostPercentage(v float64) {
- o.InfraHostPercentage = &v
-}
-
-// GetInfraHostUsage returns the InfraHostUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetInfraHostUsage() float64 {
- if o == nil || o.InfraHostUsage == nil {
- var ret float64
- return ret
- }
- return *o.InfraHostUsage
-}
-
-// GetInfraHostUsageOk returns a tuple with the InfraHostUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetInfraHostUsageOk() (*float64, bool) {
- if o == nil || o.InfraHostUsage == nil {
- return nil, false
- }
- return o.InfraHostUsage, true
-}
-
-// HasInfraHostUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasInfraHostUsage() bool {
- return o != nil && o.InfraHostUsage != nil
-}
-
-// SetInfraHostUsage gets a reference to the given float64 and assigns it to the InfraHostUsage field.
-func (o *UsageAttributionValues) SetInfraHostUsage(v float64) {
- o.InfraHostUsage = &v
-}
-
-// GetLambdaFunctionsPercentage returns the LambdaFunctionsPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetLambdaFunctionsPercentage() float64 {
- if o == nil || o.LambdaFunctionsPercentage == nil {
- var ret float64
- return ret
- }
- return *o.LambdaFunctionsPercentage
-}
-
-// GetLambdaFunctionsPercentageOk returns a tuple with the LambdaFunctionsPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetLambdaFunctionsPercentageOk() (*float64, bool) {
- if o == nil || o.LambdaFunctionsPercentage == nil {
- return nil, false
- }
- return o.LambdaFunctionsPercentage, true
-}
-
-// HasLambdaFunctionsPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasLambdaFunctionsPercentage() bool {
- return o != nil && o.LambdaFunctionsPercentage != nil
-}
-
-// SetLambdaFunctionsPercentage gets a reference to the given float64 and assigns it to the LambdaFunctionsPercentage field.
-func (o *UsageAttributionValues) SetLambdaFunctionsPercentage(v float64) {
- o.LambdaFunctionsPercentage = &v
-}
-
-// GetLambdaFunctionsUsage returns the LambdaFunctionsUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetLambdaFunctionsUsage() float64 {
- if o == nil || o.LambdaFunctionsUsage == nil {
- var ret float64
- return ret
- }
- return *o.LambdaFunctionsUsage
-}
-
-// GetLambdaFunctionsUsageOk returns a tuple with the LambdaFunctionsUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetLambdaFunctionsUsageOk() (*float64, bool) {
- if o == nil || o.LambdaFunctionsUsage == nil {
- return nil, false
- }
- return o.LambdaFunctionsUsage, true
-}
-
-// HasLambdaFunctionsUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasLambdaFunctionsUsage() bool {
- return o != nil && o.LambdaFunctionsUsage != nil
-}
-
-// SetLambdaFunctionsUsage gets a reference to the given float64 and assigns it to the LambdaFunctionsUsage field.
-func (o *UsageAttributionValues) SetLambdaFunctionsUsage(v float64) {
- o.LambdaFunctionsUsage = &v
-}
-
-// GetLambdaInvocationsPercentage returns the LambdaInvocationsPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetLambdaInvocationsPercentage() float64 {
- if o == nil || o.LambdaInvocationsPercentage == nil {
- var ret float64
- return ret
- }
- return *o.LambdaInvocationsPercentage
-}
-
-// GetLambdaInvocationsPercentageOk returns a tuple with the LambdaInvocationsPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetLambdaInvocationsPercentageOk() (*float64, bool) {
- if o == nil || o.LambdaInvocationsPercentage == nil {
- return nil, false
- }
- return o.LambdaInvocationsPercentage, true
-}
-
-// HasLambdaInvocationsPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasLambdaInvocationsPercentage() bool {
- return o != nil && o.LambdaInvocationsPercentage != nil
-}
-
-// SetLambdaInvocationsPercentage gets a reference to the given float64 and assigns it to the LambdaInvocationsPercentage field.
-func (o *UsageAttributionValues) SetLambdaInvocationsPercentage(v float64) {
- o.LambdaInvocationsPercentage = &v
-}
-
-// GetLambdaInvocationsUsage returns the LambdaInvocationsUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetLambdaInvocationsUsage() float64 {
- if o == nil || o.LambdaInvocationsUsage == nil {
- var ret float64
- return ret
- }
- return *o.LambdaInvocationsUsage
-}
-
-// GetLambdaInvocationsUsageOk returns a tuple with the LambdaInvocationsUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetLambdaInvocationsUsageOk() (*float64, bool) {
- if o == nil || o.LambdaInvocationsUsage == nil {
- return nil, false
- }
- return o.LambdaInvocationsUsage, true
-}
-
-// HasLambdaInvocationsUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasLambdaInvocationsUsage() bool {
- return o != nil && o.LambdaInvocationsUsage != nil
-}
-
-// SetLambdaInvocationsUsage gets a reference to the given float64 and assigns it to the LambdaInvocationsUsage field.
-func (o *UsageAttributionValues) SetLambdaInvocationsUsage(v float64) {
- o.LambdaInvocationsUsage = &v
-}
-
-// GetNpmHostPercentage returns the NpmHostPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetNpmHostPercentage() float64 {
- if o == nil || o.NpmHostPercentage == nil {
- var ret float64
- return ret
- }
- return *o.NpmHostPercentage
-}
-
-// GetNpmHostPercentageOk returns a tuple with the NpmHostPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetNpmHostPercentageOk() (*float64, bool) {
- if o == nil || o.NpmHostPercentage == nil {
- return nil, false
- }
- return o.NpmHostPercentage, true
-}
-
-// HasNpmHostPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasNpmHostPercentage() bool {
- return o != nil && o.NpmHostPercentage != nil
-}
-
-// SetNpmHostPercentage gets a reference to the given float64 and assigns it to the NpmHostPercentage field.
-func (o *UsageAttributionValues) SetNpmHostPercentage(v float64) {
- o.NpmHostPercentage = &v
-}
-
-// GetNpmHostUsage returns the NpmHostUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetNpmHostUsage() float64 {
- if o == nil || o.NpmHostUsage == nil {
- var ret float64
- return ret
- }
- return *o.NpmHostUsage
-}
-
-// GetNpmHostUsageOk returns a tuple with the NpmHostUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetNpmHostUsageOk() (*float64, bool) {
- if o == nil || o.NpmHostUsage == nil {
- return nil, false
- }
- return o.NpmHostUsage, true
-}
-
-// HasNpmHostUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasNpmHostUsage() bool {
- return o != nil && o.NpmHostUsage != nil
-}
-
-// SetNpmHostUsage gets a reference to the given float64 and assigns it to the NpmHostUsage field.
-func (o *UsageAttributionValues) SetNpmHostUsage(v float64) {
- o.NpmHostUsage = &v
-}
-
-// GetProfiledContainerPercentage returns the ProfiledContainerPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetProfiledContainerPercentage() float64 {
- if o == nil || o.ProfiledContainerPercentage == nil {
- var ret float64
- return ret
- }
- return *o.ProfiledContainerPercentage
-}
-
-// GetProfiledContainerPercentageOk returns a tuple with the ProfiledContainerPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetProfiledContainerPercentageOk() (*float64, bool) {
- if o == nil || o.ProfiledContainerPercentage == nil {
- return nil, false
- }
- return o.ProfiledContainerPercentage, true
-}
-
-// HasProfiledContainerPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasProfiledContainerPercentage() bool {
- return o != nil && o.ProfiledContainerPercentage != nil
-}
-
-// SetProfiledContainerPercentage gets a reference to the given float64 and assigns it to the ProfiledContainerPercentage field.
-func (o *UsageAttributionValues) SetProfiledContainerPercentage(v float64) {
- o.ProfiledContainerPercentage = &v
-}
-
-// GetProfiledContainerUsage returns the ProfiledContainerUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetProfiledContainerUsage() float64 {
- if o == nil || o.ProfiledContainerUsage == nil {
- var ret float64
- return ret
- }
- return *o.ProfiledContainerUsage
-}
-
-// GetProfiledContainerUsageOk returns a tuple with the ProfiledContainerUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetProfiledContainerUsageOk() (*float64, bool) {
- if o == nil || o.ProfiledContainerUsage == nil {
- return nil, false
- }
- return o.ProfiledContainerUsage, true
-}
-
-// HasProfiledContainerUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasProfiledContainerUsage() bool {
- return o != nil && o.ProfiledContainerUsage != nil
-}
-
-// SetProfiledContainerUsage gets a reference to the given float64 and assigns it to the ProfiledContainerUsage field.
-func (o *UsageAttributionValues) SetProfiledContainerUsage(v float64) {
- o.ProfiledContainerUsage = &v
-}
-
-// GetProfiledHostsPercentage returns the ProfiledHostsPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetProfiledHostsPercentage() float64 {
- if o == nil || o.ProfiledHostsPercentage == nil {
- var ret float64
- return ret
- }
- return *o.ProfiledHostsPercentage
-}
-
-// GetProfiledHostsPercentageOk returns a tuple with the ProfiledHostsPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetProfiledHostsPercentageOk() (*float64, bool) {
- if o == nil || o.ProfiledHostsPercentage == nil {
- return nil, false
- }
- return o.ProfiledHostsPercentage, true
-}
-
-// HasProfiledHostsPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasProfiledHostsPercentage() bool {
- return o != nil && o.ProfiledHostsPercentage != nil
-}
-
-// SetProfiledHostsPercentage gets a reference to the given float64 and assigns it to the ProfiledHostsPercentage field.
-func (o *UsageAttributionValues) SetProfiledHostsPercentage(v float64) {
- o.ProfiledHostsPercentage = &v
-}
-
-// GetProfiledHostsUsage returns the ProfiledHostsUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetProfiledHostsUsage() float64 {
- if o == nil || o.ProfiledHostsUsage == nil {
- var ret float64
- return ret
- }
- return *o.ProfiledHostsUsage
-}
-
-// GetProfiledHostsUsageOk returns a tuple with the ProfiledHostsUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetProfiledHostsUsageOk() (*float64, bool) {
- if o == nil || o.ProfiledHostsUsage == nil {
- return nil, false
- }
- return o.ProfiledHostsUsage, true
-}
-
-// HasProfiledHostsUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasProfiledHostsUsage() bool {
- return o != nil && o.ProfiledHostsUsage != nil
-}
-
-// SetProfiledHostsUsage gets a reference to the given float64 and assigns it to the ProfiledHostsUsage field.
-func (o *UsageAttributionValues) SetProfiledHostsUsage(v float64) {
- o.ProfiledHostsUsage = &v
-}
-
-// GetSnmpPercentage returns the SnmpPercentage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetSnmpPercentage() float64 {
- if o == nil || o.SnmpPercentage == nil {
- var ret float64
- return ret
- }
- return *o.SnmpPercentage
-}
-
-// GetSnmpPercentageOk returns a tuple with the SnmpPercentage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetSnmpPercentageOk() (*float64, bool) {
- if o == nil || o.SnmpPercentage == nil {
- return nil, false
- }
- return o.SnmpPercentage, true
-}
-
-// HasSnmpPercentage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasSnmpPercentage() bool {
- return o != nil && o.SnmpPercentage != nil
-}
-
-// SetSnmpPercentage gets a reference to the given float64 and assigns it to the SnmpPercentage field.
-func (o *UsageAttributionValues) SetSnmpPercentage(v float64) {
- o.SnmpPercentage = &v
-}
-
-// GetSnmpUsage returns the SnmpUsage field value if set, zero value otherwise.
-func (o *UsageAttributionValues) GetSnmpUsage() float64 {
- if o == nil || o.SnmpUsage == nil {
- var ret float64
- return ret
- }
- return *o.SnmpUsage
-}
-
-// GetSnmpUsageOk returns a tuple with the SnmpUsage field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *UsageAttributionValues) GetSnmpUsageOk() (*float64, bool) {
- if o == nil || o.SnmpUsage == nil {
- return nil, false
- }
- return o.SnmpUsage, true
-}
-
-// HasSnmpUsage returns a boolean if a field has been set.
-func (o *UsageAttributionValues) HasSnmpUsage() bool {
- return o != nil && o.SnmpUsage != nil
-}
-
-// SetSnmpUsage gets a reference to the given float64 and assigns it to the SnmpUsage field.
-func (o *UsageAttributionValues) SetSnmpUsage(v float64) {
- o.SnmpUsage = &v
-}
-
-// MarshalJSON serializes the struct using spec logic.
-func (o UsageAttributionValues) MarshalJSON() ([]byte, error) {
- toSerialize := map[string]interface{}{}
- if o.UnparsedObject != nil {
- return datadog.Marshal(o.UnparsedObject)
- }
- if o.ApiPercentage != nil {
- toSerialize["api_percentage"] = o.ApiPercentage
- }
- if o.ApiUsage != nil {
- toSerialize["api_usage"] = o.ApiUsage
- }
- if o.ApmFargatePercentage != nil {
- toSerialize["apm_fargate_percentage"] = o.ApmFargatePercentage
- }
- if o.ApmFargateUsage != nil {
- toSerialize["apm_fargate_usage"] = o.ApmFargateUsage
- }
- if o.ApmHostPercentage != nil {
- toSerialize["apm_host_percentage"] = o.ApmHostPercentage
- }
- if o.ApmHostUsage != nil {
- toSerialize["apm_host_usage"] = o.ApmHostUsage
- }
- if o.AppsecFargatePercentage != nil {
- toSerialize["appsec_fargate_percentage"] = o.AppsecFargatePercentage
- }
- if o.AppsecFargateUsage != nil {
- toSerialize["appsec_fargate_usage"] = o.AppsecFargateUsage
- }
- if o.AppsecPercentage != nil {
- toSerialize["appsec_percentage"] = o.AppsecPercentage
- }
- if o.AppsecUsage != nil {
- toSerialize["appsec_usage"] = o.AppsecUsage
- }
- if o.BrowserPercentage != nil {
- toSerialize["browser_percentage"] = o.BrowserPercentage
- }
- if o.BrowserUsage != nil {
- toSerialize["browser_usage"] = o.BrowserUsage
- }
- if o.ContainerPercentage != nil {
- toSerialize["container_percentage"] = o.ContainerPercentage
- }
- if o.ContainerUsage != nil {
- toSerialize["container_usage"] = o.ContainerUsage
- }
- if o.CspmContainerPercentage != nil {
- toSerialize["cspm_container_percentage"] = o.CspmContainerPercentage
- }
- if o.CspmContainerUsage != nil {
- toSerialize["cspm_container_usage"] = o.CspmContainerUsage
- }
- if o.CspmHostPercentage != nil {
- toSerialize["cspm_host_percentage"] = o.CspmHostPercentage
- }
- if o.CspmHostUsage != nil {
- toSerialize["cspm_host_usage"] = o.CspmHostUsage
- }
- if o.CustomTimeseriesPercentage != nil {
- toSerialize["custom_timeseries_percentage"] = o.CustomTimeseriesPercentage
- }
- if o.CustomTimeseriesUsage != nil {
- toSerialize["custom_timeseries_usage"] = o.CustomTimeseriesUsage
- }
- if o.CwsContainerPercentage != nil {
- toSerialize["cws_container_percentage"] = o.CwsContainerPercentage
- }
- if o.CwsContainerUsage != nil {
- toSerialize["cws_container_usage"] = o.CwsContainerUsage
- }
- if o.CwsHostPercentage != nil {
- toSerialize["cws_host_percentage"] = o.CwsHostPercentage
- }
- if o.CwsHostUsage != nil {
- toSerialize["cws_host_usage"] = o.CwsHostUsage
- }
- if o.DbmHostsPercentage != nil {
- toSerialize["dbm_hosts_percentage"] = o.DbmHostsPercentage
- }
- if o.DbmHostsUsage != nil {
- toSerialize["dbm_hosts_usage"] = o.DbmHostsUsage
- }
- if o.DbmQueriesPercentage != nil {
- toSerialize["dbm_queries_percentage"] = o.DbmQueriesPercentage
- }
- if o.DbmQueriesUsage != nil {
- toSerialize["dbm_queries_usage"] = o.DbmQueriesUsage
- }
- if o.EstimatedIndexedLogsPercentage != nil {
- toSerialize["estimated_indexed_logs_percentage"] = o.EstimatedIndexedLogsPercentage
- }
- if o.EstimatedIndexedLogsUsage != nil {
- toSerialize["estimated_indexed_logs_usage"] = o.EstimatedIndexedLogsUsage
- }
- if o.EstimatedIndexedSpansPercentage != nil {
- toSerialize["estimated_indexed_spans_percentage"] = o.EstimatedIndexedSpansPercentage
- }
- if o.EstimatedIndexedSpansUsage != nil {
- toSerialize["estimated_indexed_spans_usage"] = o.EstimatedIndexedSpansUsage
- }
- if o.EstimatedIngestedLogsPercentage != nil {
- toSerialize["estimated_ingested_logs_percentage"] = o.EstimatedIngestedLogsPercentage
- }
- if o.EstimatedIngestedLogsUsage != nil {
- toSerialize["estimated_ingested_logs_usage"] = o.EstimatedIngestedLogsUsage
- }
- if o.EstimatedIngestedSpansPercentage != nil {
- toSerialize["estimated_ingested_spans_percentage"] = o.EstimatedIngestedSpansPercentage
- }
- if o.EstimatedIngestedSpansUsage != nil {
- toSerialize["estimated_ingested_spans_usage"] = o.EstimatedIngestedSpansUsage
- }
- if o.EstimatedRumSessionsPercentage != nil {
- toSerialize["estimated_rum_sessions_percentage"] = o.EstimatedRumSessionsPercentage
- }
- if o.EstimatedRumSessionsUsage != nil {
- toSerialize["estimated_rum_sessions_usage"] = o.EstimatedRumSessionsUsage
- }
- if o.InfraHostPercentage != nil {
- toSerialize["infra_host_percentage"] = o.InfraHostPercentage
- }
- if o.InfraHostUsage != nil {
- toSerialize["infra_host_usage"] = o.InfraHostUsage
- }
- if o.LambdaFunctionsPercentage != nil {
- toSerialize["lambda_functions_percentage"] = o.LambdaFunctionsPercentage
- }
- if o.LambdaFunctionsUsage != nil {
- toSerialize["lambda_functions_usage"] = o.LambdaFunctionsUsage
- }
- if o.LambdaInvocationsPercentage != nil {
- toSerialize["lambda_invocations_percentage"] = o.LambdaInvocationsPercentage
- }
- if o.LambdaInvocationsUsage != nil {
- toSerialize["lambda_invocations_usage"] = o.LambdaInvocationsUsage
- }
- if o.NpmHostPercentage != nil {
- toSerialize["npm_host_percentage"] = o.NpmHostPercentage
- }
- if o.NpmHostUsage != nil {
- toSerialize["npm_host_usage"] = o.NpmHostUsage
- }
- if o.ProfiledContainerPercentage != nil {
- toSerialize["profiled_container_percentage"] = o.ProfiledContainerPercentage
- }
- if o.ProfiledContainerUsage != nil {
- toSerialize["profiled_container_usage"] = o.ProfiledContainerUsage
- }
- if o.ProfiledHostsPercentage != nil {
- toSerialize["profiled_hosts_percentage"] = o.ProfiledHostsPercentage
- }
- if o.ProfiledHostsUsage != nil {
- toSerialize["profiled_hosts_usage"] = o.ProfiledHostsUsage
- }
- if o.SnmpPercentage != nil {
- toSerialize["snmp_percentage"] = o.SnmpPercentage
- }
- if o.SnmpUsage != nil {
- toSerialize["snmp_usage"] = o.SnmpUsage
- }
-
- for key, value := range o.AdditionalProperties {
- toSerialize[key] = value
- }
- return datadog.Marshal(toSerialize)
-}
-
-// UnmarshalJSON deserializes the given payload.
-func (o *UsageAttributionValues) UnmarshalJSON(bytes []byte) (err error) {
- all := struct {
- ApiPercentage *float64 `json:"api_percentage,omitempty"`
- ApiUsage *float64 `json:"api_usage,omitempty"`
- ApmFargatePercentage *float64 `json:"apm_fargate_percentage,omitempty"`
- ApmFargateUsage *float64 `json:"apm_fargate_usage,omitempty"`
- ApmHostPercentage *float64 `json:"apm_host_percentage,omitempty"`
- ApmHostUsage *float64 `json:"apm_host_usage,omitempty"`
- AppsecFargatePercentage *float64 `json:"appsec_fargate_percentage,omitempty"`
- AppsecFargateUsage *float64 `json:"appsec_fargate_usage,omitempty"`
- AppsecPercentage *float64 `json:"appsec_percentage,omitempty"`
- AppsecUsage *float64 `json:"appsec_usage,omitempty"`
- BrowserPercentage *float64 `json:"browser_percentage,omitempty"`
- BrowserUsage *float64 `json:"browser_usage,omitempty"`
- ContainerPercentage *float64 `json:"container_percentage,omitempty"`
- ContainerUsage *float64 `json:"container_usage,omitempty"`
- CspmContainerPercentage *float64 `json:"cspm_container_percentage,omitempty"`
- CspmContainerUsage *float64 `json:"cspm_container_usage,omitempty"`
- CspmHostPercentage *float64 `json:"cspm_host_percentage,omitempty"`
- CspmHostUsage *float64 `json:"cspm_host_usage,omitempty"`
- CustomTimeseriesPercentage *float64 `json:"custom_timeseries_percentage,omitempty"`
- CustomTimeseriesUsage *float64 `json:"custom_timeseries_usage,omitempty"`
- CwsContainerPercentage *float64 `json:"cws_container_percentage,omitempty"`
- CwsContainerUsage *float64 `json:"cws_container_usage,omitempty"`
- CwsHostPercentage *float64 `json:"cws_host_percentage,omitempty"`
- CwsHostUsage *float64 `json:"cws_host_usage,omitempty"`
- DbmHostsPercentage *float64 `json:"dbm_hosts_percentage,omitempty"`
- DbmHostsUsage *float64 `json:"dbm_hosts_usage,omitempty"`
- DbmQueriesPercentage *float64 `json:"dbm_queries_percentage,omitempty"`
- DbmQueriesUsage *float64 `json:"dbm_queries_usage,omitempty"`
- EstimatedIndexedLogsPercentage *float64 `json:"estimated_indexed_logs_percentage,omitempty"`
- EstimatedIndexedLogsUsage *float64 `json:"estimated_indexed_logs_usage,omitempty"`
- EstimatedIndexedSpansPercentage *float64 `json:"estimated_indexed_spans_percentage,omitempty"`
- EstimatedIndexedSpansUsage *float64 `json:"estimated_indexed_spans_usage,omitempty"`
- EstimatedIngestedLogsPercentage *float64 `json:"estimated_ingested_logs_percentage,omitempty"`
- EstimatedIngestedLogsUsage *float64 `json:"estimated_ingested_logs_usage,omitempty"`
- EstimatedIngestedSpansPercentage *float64 `json:"estimated_ingested_spans_percentage,omitempty"`
- EstimatedIngestedSpansUsage *float64 `json:"estimated_ingested_spans_usage,omitempty"`
- EstimatedRumSessionsPercentage *float64 `json:"estimated_rum_sessions_percentage,omitempty"`
- EstimatedRumSessionsUsage *float64 `json:"estimated_rum_sessions_usage,omitempty"`
- InfraHostPercentage *float64 `json:"infra_host_percentage,omitempty"`
- InfraHostUsage *float64 `json:"infra_host_usage,omitempty"`
- LambdaFunctionsPercentage *float64 `json:"lambda_functions_percentage,omitempty"`
- LambdaFunctionsUsage *float64 `json:"lambda_functions_usage,omitempty"`
- LambdaInvocationsPercentage *float64 `json:"lambda_invocations_percentage,omitempty"`
- LambdaInvocationsUsage *float64 `json:"lambda_invocations_usage,omitempty"`
- NpmHostPercentage *float64 `json:"npm_host_percentage,omitempty"`
- NpmHostUsage *float64 `json:"npm_host_usage,omitempty"`
- ProfiledContainerPercentage *float64 `json:"profiled_container_percentage,omitempty"`
- ProfiledContainerUsage *float64 `json:"profiled_container_usage,omitempty"`
- ProfiledHostsPercentage *float64 `json:"profiled_hosts_percentage,omitempty"`
- ProfiledHostsUsage *float64 `json:"profiled_hosts_usage,omitempty"`
- SnmpPercentage *float64 `json:"snmp_percentage,omitempty"`
- SnmpUsage *float64 `json:"snmp_usage,omitempty"`
- }{}
- if err = datadog.Unmarshal(bytes, &all); err != nil {
- return datadog.Unmarshal(bytes, &o.UnparsedObject)
- }
- additionalProperties := make(map[string]interface{})
- if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
- datadog.DeleteKeys(additionalProperties, &[]string{"api_percentage", "api_usage", "apm_fargate_percentage", "apm_fargate_usage", "apm_host_percentage", "apm_host_usage", "appsec_fargate_percentage", "appsec_fargate_usage", "appsec_percentage", "appsec_usage", "browser_percentage", "browser_usage", "container_percentage", "container_usage", "cspm_container_percentage", "cspm_container_usage", "cspm_host_percentage", "cspm_host_usage", "custom_timeseries_percentage", "custom_timeseries_usage", "cws_container_percentage", "cws_container_usage", "cws_host_percentage", "cws_host_usage", "dbm_hosts_percentage", "dbm_hosts_usage", "dbm_queries_percentage", "dbm_queries_usage", "estimated_indexed_logs_percentage", "estimated_indexed_logs_usage", "estimated_indexed_spans_percentage", "estimated_indexed_spans_usage", "estimated_ingested_logs_percentage", "estimated_ingested_logs_usage", "estimated_ingested_spans_percentage", "estimated_ingested_spans_usage", "estimated_rum_sessions_percentage", "estimated_rum_sessions_usage", "infra_host_percentage", "infra_host_usage", "lambda_functions_percentage", "lambda_functions_usage", "lambda_invocations_percentage", "lambda_invocations_usage", "npm_host_percentage", "npm_host_usage", "profiled_container_percentage", "profiled_container_usage", "profiled_hosts_percentage", "profiled_hosts_usage", "snmp_percentage", "snmp_usage"})
- } else {
- return err
- }
- o.ApiPercentage = all.ApiPercentage
- o.ApiUsage = all.ApiUsage
- o.ApmFargatePercentage = all.ApmFargatePercentage
- o.ApmFargateUsage = all.ApmFargateUsage
- o.ApmHostPercentage = all.ApmHostPercentage
- o.ApmHostUsage = all.ApmHostUsage
- o.AppsecFargatePercentage = all.AppsecFargatePercentage
- o.AppsecFargateUsage = all.AppsecFargateUsage
- o.AppsecPercentage = all.AppsecPercentage
- o.AppsecUsage = all.AppsecUsage
- o.BrowserPercentage = all.BrowserPercentage
- o.BrowserUsage = all.BrowserUsage
- o.ContainerPercentage = all.ContainerPercentage
- o.ContainerUsage = all.ContainerUsage
- o.CspmContainerPercentage = all.CspmContainerPercentage
- o.CspmContainerUsage = all.CspmContainerUsage
- o.CspmHostPercentage = all.CspmHostPercentage
- o.CspmHostUsage = all.CspmHostUsage
- o.CustomTimeseriesPercentage = all.CustomTimeseriesPercentage
- o.CustomTimeseriesUsage = all.CustomTimeseriesUsage
- o.CwsContainerPercentage = all.CwsContainerPercentage
- o.CwsContainerUsage = all.CwsContainerUsage
- o.CwsHostPercentage = all.CwsHostPercentage
- o.CwsHostUsage = all.CwsHostUsage
- o.DbmHostsPercentage = all.DbmHostsPercentage
- o.DbmHostsUsage = all.DbmHostsUsage
- o.DbmQueriesPercentage = all.DbmQueriesPercentage
- o.DbmQueriesUsage = all.DbmQueriesUsage
- o.EstimatedIndexedLogsPercentage = all.EstimatedIndexedLogsPercentage
- o.EstimatedIndexedLogsUsage = all.EstimatedIndexedLogsUsage
- o.EstimatedIndexedSpansPercentage = all.EstimatedIndexedSpansPercentage
- o.EstimatedIndexedSpansUsage = all.EstimatedIndexedSpansUsage
- o.EstimatedIngestedLogsPercentage = all.EstimatedIngestedLogsPercentage
- o.EstimatedIngestedLogsUsage = all.EstimatedIngestedLogsUsage
- o.EstimatedIngestedSpansPercentage = all.EstimatedIngestedSpansPercentage
- o.EstimatedIngestedSpansUsage = all.EstimatedIngestedSpansUsage
- o.EstimatedRumSessionsPercentage = all.EstimatedRumSessionsPercentage
- o.EstimatedRumSessionsUsage = all.EstimatedRumSessionsUsage
- o.InfraHostPercentage = all.InfraHostPercentage
- o.InfraHostUsage = all.InfraHostUsage
- o.LambdaFunctionsPercentage = all.LambdaFunctionsPercentage
- o.LambdaFunctionsUsage = all.LambdaFunctionsUsage
- o.LambdaInvocationsPercentage = all.LambdaInvocationsPercentage
- o.LambdaInvocationsUsage = all.LambdaInvocationsUsage
- o.NpmHostPercentage = all.NpmHostPercentage
- o.NpmHostUsage = all.NpmHostUsage
- o.ProfiledContainerPercentage = all.ProfiledContainerPercentage
- o.ProfiledContainerUsage = all.ProfiledContainerUsage
- o.ProfiledHostsPercentage = all.ProfiledHostsPercentage
- o.ProfiledHostsUsage = all.ProfiledHostsUsage
- o.SnmpPercentage = all.SnmpPercentage
- o.SnmpUsage = all.SnmpUsage
-
- if len(additionalProperties) > 0 {
- o.AdditionalProperties = additionalProperties
- }
-
- return nil
-}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_billable_summary_keys.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_billable_summary_keys.go
index b7e14182a0..aa0bdea2a5 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_billable_summary_keys.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV1/model_usage_billable_summary_keys.go
@@ -115,6 +115,8 @@ type UsageBillableSummaryKeys struct {
// Response with properties for each aggregated usage type.
LogsIndexed180daySum *UsageBillableSummaryBody `json:"logs_indexed_180day_sum,omitempty"`
// Response with properties for each aggregated usage type.
+ LogsIndexed1daySum *UsageBillableSummaryBody `json:"logs_indexed_1day_sum,omitempty"`
+ // Response with properties for each aggregated usage type.
LogsIndexed30daySum *UsageBillableSummaryBody `json:"logs_indexed_30day_sum,omitempty"`
// Response with properties for each aggregated usage type.
LogsIndexed360daySum *UsageBillableSummaryBody `json:"logs_indexed_360day_sum,omitempty"`
@@ -1664,6 +1666,34 @@ func (o *UsageBillableSummaryKeys) SetLogsIndexed180daySum(v UsageBillableSummar
o.LogsIndexed180daySum = &v
}
+// GetLogsIndexed1daySum returns the LogsIndexed1daySum field value if set, zero value otherwise.
+func (o *UsageBillableSummaryKeys) GetLogsIndexed1daySum() UsageBillableSummaryBody {
+ if o == nil || o.LogsIndexed1daySum == nil {
+ var ret UsageBillableSummaryBody
+ return ret
+ }
+ return *o.LogsIndexed1daySum
+}
+
+// GetLogsIndexed1daySumOk returns a tuple with the LogsIndexed1daySum field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *UsageBillableSummaryKeys) GetLogsIndexed1daySumOk() (*UsageBillableSummaryBody, bool) {
+ if o == nil || o.LogsIndexed1daySum == nil {
+ return nil, false
+ }
+ return o.LogsIndexed1daySum, true
+}
+
+// HasLogsIndexed1daySum returns a boolean if a field has been set.
+func (o *UsageBillableSummaryKeys) HasLogsIndexed1daySum() bool {
+ return o != nil && o.LogsIndexed1daySum != nil
+}
+
+// SetLogsIndexed1daySum gets a reference to the given UsageBillableSummaryBody and assigns it to the LogsIndexed1daySum field.
+func (o *UsageBillableSummaryKeys) SetLogsIndexed1daySum(v UsageBillableSummaryBody) {
+ o.LogsIndexed1daySum = &v
+}
+
// GetLogsIndexed30daySum returns the LogsIndexed30daySum field value if set, zero value otherwise.
func (o *UsageBillableSummaryKeys) GetLogsIndexed30daySum() UsageBillableSummaryBody {
if o == nil || o.LogsIndexed30daySum == nil {
@@ -2834,6 +2864,9 @@ func (o UsageBillableSummaryKeys) MarshalJSON() ([]byte, error) {
if o.LogsIndexed180daySum != nil {
toSerialize["logs_indexed_180day_sum"] = o.LogsIndexed180daySum
}
+ if o.LogsIndexed1daySum != nil {
+ toSerialize["logs_indexed_1day_sum"] = o.LogsIndexed1daySum
+ }
if o.LogsIndexed30daySum != nil {
toSerialize["logs_indexed_30day_sum"] = o.LogsIndexed30daySum
}
@@ -3004,6 +3037,7 @@ func (o *UsageBillableSummaryKeys) UnmarshalJSON(bytes []byte) (err error) {
LogsForwardingSum *UsageBillableSummaryBody `json:"logs_forwarding_sum,omitempty"`
LogsIndexed15daySum *UsageBillableSummaryBody `json:"logs_indexed_15day_sum,omitempty"`
LogsIndexed180daySum *UsageBillableSummaryBody `json:"logs_indexed_180day_sum,omitempty"`
+ LogsIndexed1daySum *UsageBillableSummaryBody `json:"logs_indexed_1day_sum,omitempty"`
LogsIndexed30daySum *UsageBillableSummaryBody `json:"logs_indexed_30day_sum,omitempty"`
LogsIndexed360daySum *UsageBillableSummaryBody `json:"logs_indexed_360day_sum,omitempty"`
LogsIndexed3daySum *UsageBillableSummaryBody `json:"logs_indexed_3day_sum,omitempty"`
@@ -3046,7 +3080,7 @@ func (o *UsageBillableSummaryKeys) UnmarshalJSON(bytes []byte) (err error) {
}
additionalProperties := make(map[string]interface{})
if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
- datadog.DeleteKeys(additionalProperties, &[]string{"apm_fargate_average", "apm_fargate_sum", "apm_host_sum", "apm_host_top99p", "apm_profiler_host_sum", "apm_profiler_host_top99p", "apm_trace_search_sum", "application_security_fargate_average", "application_security_host_sum", "application_security_host_top99p", "ci_pipeline_indexed_spans_sum", "ci_pipeline_maximum", "ci_pipeline_sum", "ci_test_indexed_spans_sum", "ci_testing_maximum", "ci_testing_sum", "cloud_cost_management_average", "cloud_cost_management_sum", "cspm_container_sum", "cspm_host_sum", "cspm_host_top99p", "custom_event_sum", "cws_container_sum", "cws_host_sum", "cws_host_top99p", "dbm_host_sum", "dbm_host_top99p", "dbm_normalized_queries_average", "dbm_normalized_queries_sum", "fargate_container_apm_and_profiler_average", "fargate_container_apm_and_profiler_sum", "fargate_container_average", "fargate_container_profiler_average", "fargate_container_profiler_sum", "fargate_container_sum", "incident_management_maximum", "incident_management_sum", "infra_and_apm_host_sum", "infra_and_apm_host_top99p", "infra_container_sum", "infra_host_sum", "infra_host_top99p", "ingested_spans_sum", "ingested_timeseries_average", "ingested_timeseries_sum", "iot_sum", "iot_top99p", "lambda_function_average", "lambda_function_sum", "logs_forwarding_sum", "logs_indexed_15day_sum", "logs_indexed_180day_sum", "logs_indexed_30day_sum", "logs_indexed_360day_sum", "logs_indexed_3day_sum", "logs_indexed_45day_sum", "logs_indexed_60day_sum", "logs_indexed_7day_sum", "logs_indexed_90day_sum", "logs_indexed_custom_retention_sum", "logs_indexed_sum", "logs_ingested_sum", "network_device_sum", "network_device_top99p", "npm_flow_sum", "npm_host_sum", "npm_host_top99p", "observability_pipeline_sum", "online_archive_sum", "prof_container_sum", "prof_host_sum", "prof_host_top99p", "rum_lite_sum", "rum_replay_sum", "rum_sum", "rum_units_sum", "sensitive_data_scanner_sum", "serverless_apm_sum", "serverless_infra_average", "serverless_infra_sum", "serverless_invocation_sum", "siem_sum", "standard_timeseries_average", "synthetics_api_tests_sum", "synthetics_app_testing_maximum", "synthetics_browser_checks_sum", "timeseries_average", "timeseries_sum"})
+ datadog.DeleteKeys(additionalProperties, &[]string{"apm_fargate_average", "apm_fargate_sum", "apm_host_sum", "apm_host_top99p", "apm_profiler_host_sum", "apm_profiler_host_top99p", "apm_trace_search_sum", "application_security_fargate_average", "application_security_host_sum", "application_security_host_top99p", "ci_pipeline_indexed_spans_sum", "ci_pipeline_maximum", "ci_pipeline_sum", "ci_test_indexed_spans_sum", "ci_testing_maximum", "ci_testing_sum", "cloud_cost_management_average", "cloud_cost_management_sum", "cspm_container_sum", "cspm_host_sum", "cspm_host_top99p", "custom_event_sum", "cws_container_sum", "cws_host_sum", "cws_host_top99p", "dbm_host_sum", "dbm_host_top99p", "dbm_normalized_queries_average", "dbm_normalized_queries_sum", "fargate_container_apm_and_profiler_average", "fargate_container_apm_and_profiler_sum", "fargate_container_average", "fargate_container_profiler_average", "fargate_container_profiler_sum", "fargate_container_sum", "incident_management_maximum", "incident_management_sum", "infra_and_apm_host_sum", "infra_and_apm_host_top99p", "infra_container_sum", "infra_host_sum", "infra_host_top99p", "ingested_spans_sum", "ingested_timeseries_average", "ingested_timeseries_sum", "iot_sum", "iot_top99p", "lambda_function_average", "lambda_function_sum", "logs_forwarding_sum", "logs_indexed_15day_sum", "logs_indexed_180day_sum", "logs_indexed_1day_sum", "logs_indexed_30day_sum", "logs_indexed_360day_sum", "logs_indexed_3day_sum", "logs_indexed_45day_sum", "logs_indexed_60day_sum", "logs_indexed_7day_sum", "logs_indexed_90day_sum", "logs_indexed_custom_retention_sum", "logs_indexed_sum", "logs_ingested_sum", "network_device_sum", "network_device_top99p", "npm_flow_sum", "npm_host_sum", "npm_host_top99p", "observability_pipeline_sum", "online_archive_sum", "prof_container_sum", "prof_host_sum", "prof_host_top99p", "rum_lite_sum", "rum_replay_sum", "rum_sum", "rum_units_sum", "sensitive_data_scanner_sum", "serverless_apm_sum", "serverless_infra_average", "serverless_infra_sum", "serverless_invocation_sum", "siem_sum", "standard_timeseries_average", "synthetics_api_tests_sum", "synthetics_app_testing_maximum", "synthetics_browser_checks_sum", "timeseries_average", "timeseries_sum"})
} else {
return err
}
@@ -3260,6 +3294,10 @@ func (o *UsageBillableSummaryKeys) UnmarshalJSON(bytes []byte) (err error) {
hasInvalidField = true
}
o.LogsIndexed180daySum = all.LogsIndexed180daySum
+ if all.LogsIndexed1daySum != nil && all.LogsIndexed1daySum.UnparsedObject != nil && o.UnparsedObject == nil {
+ hasInvalidField = true
+ }
+ o.LogsIndexed1daySum = all.LogsIndexed1daySum
if all.LogsIndexed30daySum != nil && all.LogsIndexed30daySum.UnparsedObject != nil && o.UnparsedObject == nil {
hasInvalidField = true
}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_apm_retention_filters.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_apm_retention_filters.go
index 5de41d6ee9..6ab0f867b6 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_apm_retention_filters.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_apm_retention_filters.go
@@ -19,11 +19,13 @@ type APMRetentionFiltersApi datadog.Service
// CreateApmRetentionFilter Create a retention filter.
// Create a retention filter to index spans in your organization.
// Returns the retention filter definition when the request is successful.
-func (a *APMRetentionFiltersApi) CreateApmRetentionFilter(ctx _context.Context, body RetentionFilterCreateRequest) (RetentionFilterResponse, *_nethttp.Response, error) {
+//
+// Default filters with types spans-errors-sampling-processor and spans-appsec-sampling-processor cannot be created.
+func (a *APMRetentionFiltersApi) CreateApmRetentionFilter(ctx _context.Context, body RetentionFilterCreateRequest) (RetentionFilterCreateResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodPost
localVarPostBody interface{}
- localVarReturnValue RetentionFilterResponse
+ localVarReturnValue RetentionFilterCreateResponse
)
localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.APMRetentionFiltersApi.CreateApmRetentionFilter")
@@ -92,6 +94,8 @@ func (a *APMRetentionFiltersApi) CreateApmRetentionFilter(ctx _context.Context,
// DeleteApmRetentionFilter Delete a retention filter.
// Delete a specific retention filter from your organization.
+//
+// Default filters with types spans-errors-sampling-processor and spans-appsec-sampling-processor cannot be deleted.
func (a *APMRetentionFiltersApi) DeleteApmRetentionFilter(ctx _context.Context, filterId string) (*_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodDelete
@@ -357,6 +361,8 @@ func (a *APMRetentionFiltersApi) ReorderApmRetentionFilters(ctx _context.Context
// UpdateApmRetentionFilter Update a retention filter.
// Update a retention filter from your organization.
+//
+// Default filters (filters with types spans-errors-sampling-processor and spans-appsec-sampling-processor) cannot be renamed or removed.
func (a *APMRetentionFiltersApi) UpdateApmRetentionFilter(ctx _context.Context, filterId string, body RetentionFilterUpdateRequest) (RetentionFilterResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodPut
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_case_management.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_case_management.go
index 56c23d21a2..f391ce0f20 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_case_management.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_case_management.go
@@ -586,7 +586,7 @@ func (a *CaseManagementApi) GetProjects(ctx _context.Context) (ProjectsResponse,
// SearchCasesOptionalParameters holds optional parameters for SearchCases.
type SearchCasesOptionalParameters struct {
PageSize *int64
- PageOffset *int64
+ PageNumber *int64
SortField *CaseSortableField
Filter *string
SortAsc *bool
@@ -604,9 +604,9 @@ func (r *SearchCasesOptionalParameters) WithPageSize(pageSize int64) *SearchCase
return r
}
-// WithPageOffset sets the corresponding parameter name and returns the struct.
-func (r *SearchCasesOptionalParameters) WithPageOffset(pageOffset int64) *SearchCasesOptionalParameters {
- r.PageOffset = &pageOffset
+// WithPageNumber sets the corresponding parameter name and returns the struct.
+func (r *SearchCasesOptionalParameters) WithPageNumber(pageNumber int64) *SearchCasesOptionalParameters {
+ r.PageNumber = &pageNumber
return r
}
@@ -658,8 +658,8 @@ func (a *CaseManagementApi) SearchCases(ctx _context.Context, o ...SearchCasesOp
if optionalParams.PageSize != nil {
localVarQueryParams.Add("page[size]", datadog.ParameterToString(*optionalParams.PageSize, ""))
}
- if optionalParams.PageOffset != nil {
- localVarQueryParams.Add("page[offset]", datadog.ParameterToString(*optionalParams.PageOffset, ""))
+ if optionalParams.PageNumber != nil {
+ localVarQueryParams.Add("page[number]", datadog.ParameterToString(*optionalParams.PageNumber, ""))
}
if optionalParams.SortField != nil {
localVarQueryParams.Add("sort[field]", datadog.ParameterToString(*optionalParams.SortField, ""))
@@ -732,6 +732,8 @@ func (a *CaseManagementApi) SearchCasesWithPagination(ctx _context.Context, o ..
pageSize_ = *o[0].PageSize
}
o[0].PageSize = &pageSize_
+ page_ := int64(0)
+ o[0].PageNumber = &page_
items := make(chan datadog.PaginationResult[Case], pageSize_)
go func() {
@@ -759,12 +761,8 @@ func (a *CaseManagementApi) SearchCasesWithPagination(ctx _context.Context, o ..
if len(results) < int(pageSize_) {
break
}
- if o[0].PageOffset == nil {
- o[0].PageOffset = &pageSize_
- } else {
- pageOffset_ := *o[0].PageOffset + pageSize_
- o[0].PageOffset = &pageOffset_
- }
+ pageOffset_ := *o[0].PageNumber + 1
+ o[0].PageNumber = &pageOffset_
}
close(items)
}()
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_cloud_workload_security.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_csm_threats.go
similarity index 92%
rename from vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_cloud_workload_security.go
rename to vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_csm_threats.go
index d133609597..ccfba17dff 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_cloud_workload_security.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_csm_threats.go
@@ -14,19 +14,19 @@ import (
"github.com/DataDog/datadog-api-client-go/v2/api/datadog"
)
-// CloudWorkloadSecurityApi service type
-type CloudWorkloadSecurityApi datadog.Service
+// CSMThreatsApi service type
+type CSMThreatsApi datadog.Service
// CreateCSMThreatsAgentRule Create a CSM Threats Agent rule.
// Create a new Cloud Security Management Threats Agent rule with the given parameters.
-func (a *CloudWorkloadSecurityApi) CreateCSMThreatsAgentRule(ctx _context.Context, body CloudWorkloadSecurityAgentRuleCreateRequest) (CloudWorkloadSecurityAgentRuleResponse, *_nethttp.Response, error) {
+func (a *CSMThreatsApi) CreateCSMThreatsAgentRule(ctx _context.Context, body CloudWorkloadSecurityAgentRuleCreateRequest) (CloudWorkloadSecurityAgentRuleResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodPost
localVarPostBody interface{}
localVarReturnValue CloudWorkloadSecurityAgentRuleResponse
)
- localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CloudWorkloadSecurityApi.CreateCSMThreatsAgentRule")
+ localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CSMThreatsApi.CreateCSMThreatsAgentRule")
if err != nil {
return localVarReturnValue, nil, datadog.GenericOpenAPIError{ErrorMessage: err.Error()}
}
@@ -92,14 +92,14 @@ func (a *CloudWorkloadSecurityApi) CreateCSMThreatsAgentRule(ctx _context.Contex
// CreateCloudWorkloadSecurityAgentRule Create a Cloud Workload Security Agent rule.
// Create a new Agent rule with the given parameters.
-func (a *CloudWorkloadSecurityApi) CreateCloudWorkloadSecurityAgentRule(ctx _context.Context, body CloudWorkloadSecurityAgentRuleCreateRequest) (CloudWorkloadSecurityAgentRuleResponse, *_nethttp.Response, error) {
+func (a *CSMThreatsApi) CreateCloudWorkloadSecurityAgentRule(ctx _context.Context, body CloudWorkloadSecurityAgentRuleCreateRequest) (CloudWorkloadSecurityAgentRuleResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodPost
localVarPostBody interface{}
localVarReturnValue CloudWorkloadSecurityAgentRuleResponse
)
- localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CloudWorkloadSecurityApi.CreateCloudWorkloadSecurityAgentRule")
+ localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CSMThreatsApi.CreateCloudWorkloadSecurityAgentRule")
if err != nil {
return localVarReturnValue, nil, datadog.GenericOpenAPIError{ErrorMessage: err.Error()}
}
@@ -165,13 +165,13 @@ func (a *CloudWorkloadSecurityApi) CreateCloudWorkloadSecurityAgentRule(ctx _con
// DeleteCSMThreatsAgentRule Delete a CSM Threats Agent rule.
// Delete a specific Cloud Security Management Threats Agent rule.
-func (a *CloudWorkloadSecurityApi) DeleteCSMThreatsAgentRule(ctx _context.Context, agentRuleId string) (*_nethttp.Response, error) {
+func (a *CSMThreatsApi) DeleteCSMThreatsAgentRule(ctx _context.Context, agentRuleId string) (*_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodDelete
localVarPostBody interface{}
)
- localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CloudWorkloadSecurityApi.DeleteCSMThreatsAgentRule")
+ localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CSMThreatsApi.DeleteCSMThreatsAgentRule")
if err != nil {
return nil, datadog.GenericOpenAPIError{ErrorMessage: err.Error()}
}
@@ -226,13 +226,13 @@ func (a *CloudWorkloadSecurityApi) DeleteCSMThreatsAgentRule(ctx _context.Contex
// DeleteCloudWorkloadSecurityAgentRule Delete a Cloud Workload Security Agent rule.
// Delete a specific Agent rule.
-func (a *CloudWorkloadSecurityApi) DeleteCloudWorkloadSecurityAgentRule(ctx _context.Context, agentRuleId string) (*_nethttp.Response, error) {
+func (a *CSMThreatsApi) DeleteCloudWorkloadSecurityAgentRule(ctx _context.Context, agentRuleId string) (*_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodDelete
localVarPostBody interface{}
)
- localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CloudWorkloadSecurityApi.DeleteCloudWorkloadSecurityAgentRule")
+ localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CSMThreatsApi.DeleteCloudWorkloadSecurityAgentRule")
if err != nil {
return nil, datadog.GenericOpenAPIError{ErrorMessage: err.Error()}
}
@@ -289,14 +289,14 @@ func (a *CloudWorkloadSecurityApi) DeleteCloudWorkloadSecurityAgentRule(ctx _con
// The download endpoint generates a CSM Threats policy file from your currently active
// CSM Threats rules, and downloads them as a `.policy` file. This file can then be deployed to
// your Agents to update the policy running in your environment.
-func (a *CloudWorkloadSecurityApi) DownloadCSMThreatsPolicy(ctx _context.Context) (_io.Reader, *_nethttp.Response, error) {
+func (a *CSMThreatsApi) DownloadCSMThreatsPolicy(ctx _context.Context) (_io.Reader, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
localVarPostBody interface{}
localVarReturnValue _io.Reader
)
- localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CloudWorkloadSecurityApi.DownloadCSMThreatsPolicy")
+ localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CSMThreatsApi.DownloadCSMThreatsPolicy")
if err != nil {
return localVarReturnValue, nil, datadog.GenericOpenAPIError{ErrorMessage: err.Error()}
}
@@ -353,14 +353,14 @@ func (a *CloudWorkloadSecurityApi) DownloadCSMThreatsPolicy(ctx _context.Context
// The download endpoint generates a Cloud Workload Security policy file from your currently active
// Cloud Workload Security rules, and downloads them as a .policy file. This file can then be deployed to
// your Agents to update the policy running in your environment.
-func (a *CloudWorkloadSecurityApi) DownloadCloudWorkloadPolicyFile(ctx _context.Context) (_io.Reader, *_nethttp.Response, error) {
+func (a *CSMThreatsApi) DownloadCloudWorkloadPolicyFile(ctx _context.Context) (_io.Reader, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
localVarPostBody interface{}
localVarReturnValue _io.Reader
)
- localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CloudWorkloadSecurityApi.DownloadCloudWorkloadPolicyFile")
+ localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CSMThreatsApi.DownloadCloudWorkloadPolicyFile")
if err != nil {
return localVarReturnValue, nil, datadog.GenericOpenAPIError{ErrorMessage: err.Error()}
}
@@ -415,14 +415,14 @@ func (a *CloudWorkloadSecurityApi) DownloadCloudWorkloadPolicyFile(ctx _context.
// GetCSMThreatsAgentRule Get a CSM Threats Agent rule.
// Get the details of a specific Cloud Security Management Threats Agent rule.
-func (a *CloudWorkloadSecurityApi) GetCSMThreatsAgentRule(ctx _context.Context, agentRuleId string) (CloudWorkloadSecurityAgentRuleResponse, *_nethttp.Response, error) {
+func (a *CSMThreatsApi) GetCSMThreatsAgentRule(ctx _context.Context, agentRuleId string) (CloudWorkloadSecurityAgentRuleResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
localVarPostBody interface{}
localVarReturnValue CloudWorkloadSecurityAgentRuleResponse
)
- localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CloudWorkloadSecurityApi.GetCSMThreatsAgentRule")
+ localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CSMThreatsApi.GetCSMThreatsAgentRule")
if err != nil {
return localVarReturnValue, nil, datadog.GenericOpenAPIError{ErrorMessage: err.Error()}
}
@@ -486,14 +486,14 @@ func (a *CloudWorkloadSecurityApi) GetCSMThreatsAgentRule(ctx _context.Context,
// GetCloudWorkloadSecurityAgentRule Get a Cloud Workload Security Agent rule.
// Get the details of a specific Agent rule.
-func (a *CloudWorkloadSecurityApi) GetCloudWorkloadSecurityAgentRule(ctx _context.Context, agentRuleId string) (CloudWorkloadSecurityAgentRuleResponse, *_nethttp.Response, error) {
+func (a *CSMThreatsApi) GetCloudWorkloadSecurityAgentRule(ctx _context.Context, agentRuleId string) (CloudWorkloadSecurityAgentRuleResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
localVarPostBody interface{}
localVarReturnValue CloudWorkloadSecurityAgentRuleResponse
)
- localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CloudWorkloadSecurityApi.GetCloudWorkloadSecurityAgentRule")
+ localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CSMThreatsApi.GetCloudWorkloadSecurityAgentRule")
if err != nil {
return localVarReturnValue, nil, datadog.GenericOpenAPIError{ErrorMessage: err.Error()}
}
@@ -557,14 +557,14 @@ func (a *CloudWorkloadSecurityApi) GetCloudWorkloadSecurityAgentRule(ctx _contex
// ListCSMThreatsAgentRules Get all CSM Threats Agent rules.
// Get the list of Cloud Security Management Threats Agent rules.
-func (a *CloudWorkloadSecurityApi) ListCSMThreatsAgentRules(ctx _context.Context) (CloudWorkloadSecurityAgentRulesListResponse, *_nethttp.Response, error) {
+func (a *CSMThreatsApi) ListCSMThreatsAgentRules(ctx _context.Context) (CloudWorkloadSecurityAgentRulesListResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
localVarPostBody interface{}
localVarReturnValue CloudWorkloadSecurityAgentRulesListResponse
)
- localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CloudWorkloadSecurityApi.ListCSMThreatsAgentRules")
+ localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CSMThreatsApi.ListCSMThreatsAgentRules")
if err != nil {
return localVarReturnValue, nil, datadog.GenericOpenAPIError{ErrorMessage: err.Error()}
}
@@ -627,14 +627,14 @@ func (a *CloudWorkloadSecurityApi) ListCSMThreatsAgentRules(ctx _context.Context
// ListCloudWorkloadSecurityAgentRules Get all Cloud Workload Security Agent rules.
// Get the list of Agent rules.
-func (a *CloudWorkloadSecurityApi) ListCloudWorkloadSecurityAgentRules(ctx _context.Context) (CloudWorkloadSecurityAgentRulesListResponse, *_nethttp.Response, error) {
+func (a *CSMThreatsApi) ListCloudWorkloadSecurityAgentRules(ctx _context.Context) (CloudWorkloadSecurityAgentRulesListResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
localVarPostBody interface{}
localVarReturnValue CloudWorkloadSecurityAgentRulesListResponse
)
- localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CloudWorkloadSecurityApi.ListCloudWorkloadSecurityAgentRules")
+ localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CSMThreatsApi.ListCloudWorkloadSecurityAgentRules")
if err != nil {
return localVarReturnValue, nil, datadog.GenericOpenAPIError{ErrorMessage: err.Error()}
}
@@ -698,14 +698,14 @@ func (a *CloudWorkloadSecurityApi) ListCloudWorkloadSecurityAgentRules(ctx _cont
// UpdateCSMThreatsAgentRule Update a CSM Threats Agent rule.
// Update a specific Cloud Security Management Threats Agent rule.
// Returns the Agent rule object when the request is successful.
-func (a *CloudWorkloadSecurityApi) UpdateCSMThreatsAgentRule(ctx _context.Context, agentRuleId string, body CloudWorkloadSecurityAgentRuleUpdateRequest) (CloudWorkloadSecurityAgentRuleResponse, *_nethttp.Response, error) {
+func (a *CSMThreatsApi) UpdateCSMThreatsAgentRule(ctx _context.Context, agentRuleId string, body CloudWorkloadSecurityAgentRuleUpdateRequest) (CloudWorkloadSecurityAgentRuleResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodPatch
localVarPostBody interface{}
localVarReturnValue CloudWorkloadSecurityAgentRuleResponse
)
- localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CloudWorkloadSecurityApi.UpdateCSMThreatsAgentRule")
+ localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CSMThreatsApi.UpdateCSMThreatsAgentRule")
if err != nil {
return localVarReturnValue, nil, datadog.GenericOpenAPIError{ErrorMessage: err.Error()}
}
@@ -773,14 +773,14 @@ func (a *CloudWorkloadSecurityApi) UpdateCSMThreatsAgentRule(ctx _context.Contex
// UpdateCloudWorkloadSecurityAgentRule Update a Cloud Workload Security Agent rule.
// Update a specific Agent rule.
// Returns the Agent rule object when the request is successful.
-func (a *CloudWorkloadSecurityApi) UpdateCloudWorkloadSecurityAgentRule(ctx _context.Context, agentRuleId string, body CloudWorkloadSecurityAgentRuleUpdateRequest) (CloudWorkloadSecurityAgentRuleResponse, *_nethttp.Response, error) {
+func (a *CSMThreatsApi) UpdateCloudWorkloadSecurityAgentRule(ctx _context.Context, agentRuleId string, body CloudWorkloadSecurityAgentRuleUpdateRequest) (CloudWorkloadSecurityAgentRuleResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodPatch
localVarPostBody interface{}
localVarReturnValue CloudWorkloadSecurityAgentRuleResponse
)
- localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CloudWorkloadSecurityApi.UpdateCloudWorkloadSecurityAgentRule")
+ localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.CSMThreatsApi.UpdateCloudWorkloadSecurityAgentRule")
if err != nil {
return localVarReturnValue, nil, datadog.GenericOpenAPIError{ErrorMessage: err.Error()}
}
@@ -845,9 +845,9 @@ func (a *CloudWorkloadSecurityApi) UpdateCloudWorkloadSecurityAgentRule(ctx _con
return localVarReturnValue, localVarHTTPResponse, nil
}
-// NewCloudWorkloadSecurityApi Returns NewCloudWorkloadSecurityApi.
-func NewCloudWorkloadSecurityApi(client *datadog.APIClient) *CloudWorkloadSecurityApi {
- return &CloudWorkloadSecurityApi{
+// NewCSMThreatsApi Returns NewCSMThreatsApi.
+func NewCSMThreatsApi(client *datadog.APIClient) *CSMThreatsApi {
+ return &CSMThreatsApi{
Client: client,
}
}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_metrics.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_metrics.go
index 83ab9016ef..8420e5707c 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_metrics.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_metrics.go
@@ -470,7 +470,7 @@ func (r *ListActiveMetricConfigurationsOptionalParameters) WithWindowSeconds(win
}
// ListActiveMetricConfigurations List active tags and aggregations.
-// List tags and aggregations that are actively queried on dashboards and monitors for a given metric name.
+// List tags and aggregations that are actively queried on dashboards, notebooks, monitors, and the Metrics Explorer for a given metric name.
func (a *MetricsApi) ListActiveMetricConfigurations(ctx _context.Context, metricName string, o ...ListActiveMetricConfigurationsOptionalParameters) (MetricSuggestedTagsAndAggregationsResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_restriction_policies.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_restriction_policies.go
index 8898597d5c..d6b5897a05 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_restriction_policies.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_restriction_policies.go
@@ -153,18 +153,18 @@ func (a *RestrictionPoliciesApi) GetRestrictionPolicy(ctx _context.Context, reso
//
// #### Supported resources
// Restriction policies can be applied to the following resources:
-// - Connections: `connection`
// - Dashboards: `dashboard`
// - Notebooks: `notebook`
+// - Powerpacks: `powerpack`
// - Security Rules: `security-rule`
// - Service Level Objectives: `slo`
//
// #### Supported relations for resources
// Resource Type | Supported Relations
// -------------------------|--------------------------
-// Connections | `viewer`, `editor`, `resolver`
// Dashboards | `viewer`, `editor`
// Notebooks | `viewer`, `editor`
+// Powerpacks | `viewer`, `editor`
// Security Rules | `viewer`, `editor`
// Service Level Objectives | `viewer`, `editor`
func (a *RestrictionPoliciesApi) UpdateRestrictionPolicy(ctx _context.Context, resourceId string, body RestrictionPolicyUpdateRequest) (RestrictionPolicyResponse, *_nethttp.Response, error) {
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_security_monitoring.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_security_monitoring.go
index 62da317225..42d66b2308 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_security_monitoring.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_security_monitoring.go
@@ -2121,7 +2121,8 @@ func (a *SecurityMonitoringApi) UpdateSecurityFilter(ctx _context.Context, secur
// UpdateSecurityMonitoringRule Update an existing rule.
// Update an existing rule. When updating `cases`, `queries` or `options`, the whole field
// must be included. For example, when modifying a query all queries must be included.
-// Default rules can only be updated to be enabled and to change notifications.
+// Default rules can only be updated to be enabled, to change notifications, or to update
+// the tags (default tags cannot be removed).
func (a *SecurityMonitoringApi) UpdateSecurityMonitoringRule(ctx _context.Context, ruleId string, body SecurityMonitoringRuleUpdatePayload) (SecurityMonitoringRuleResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodPut
@@ -2268,6 +2269,69 @@ func (a *SecurityMonitoringApi) UpdateSecurityMonitoringSuppression(ctx _context
return localVarReturnValue, localVarHTTPResponse, nil
}
+// ValidateSecurityMonitoringRule Validate a detection rule.
+// Validate a detection rule.
+func (a *SecurityMonitoringApi) ValidateSecurityMonitoringRule(ctx _context.Context, body SecurityMonitoringRuleCreatePayload) (*_nethttp.Response, error) {
+ var (
+ localVarHTTPMethod = _nethttp.MethodPost
+ localVarPostBody interface{}
+ )
+
+ localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.SecurityMonitoringApi.ValidateSecurityMonitoringRule")
+ if err != nil {
+ return nil, datadog.GenericOpenAPIError{ErrorMessage: err.Error()}
+ }
+
+ localVarPath := localBasePath + "/api/v2/security_monitoring/rules/validation"
+
+ localVarHeaderParams := make(map[string]string)
+ localVarQueryParams := _neturl.Values{}
+ localVarFormParams := _neturl.Values{}
+ localVarHeaderParams["Content-Type"] = "application/json"
+ localVarHeaderParams["Accept"] = "*/*"
+
+ // body params
+ localVarPostBody = &body
+ datadog.SetAuthKeys(
+ ctx,
+ &localVarHeaderParams,
+ [2]string{"apiKeyAuth", "DD-API-KEY"},
+ [2]string{"appKeyAuth", "DD-APPLICATION-KEY"},
+ )
+ req, err := a.Client.PrepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ localVarHTTPResponse, err := a.Client.CallAPI(req)
+ if err != nil || localVarHTTPResponse == nil {
+ return localVarHTTPResponse, err
+ }
+
+ localVarBody, err := datadog.ReadBody(localVarHTTPResponse)
+ if err != nil {
+ return localVarHTTPResponse, err
+ }
+
+ if localVarHTTPResponse.StatusCode >= 300 {
+ newErr := datadog.GenericOpenAPIError{
+ ErrorBody: localVarBody,
+ ErrorMessage: localVarHTTPResponse.Status,
+ }
+ if localVarHTTPResponse.StatusCode == 400 || localVarHTTPResponse.StatusCode == 403 || localVarHTTPResponse.StatusCode == 429 {
+ var v APIErrorResponse
+ err = a.Client.Decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
+ if err != nil {
+ return localVarHTTPResponse, newErr
+ }
+ newErr.ErrorModel = v
+ }
+ return localVarHTTPResponse, newErr
+ }
+
+ return localVarHTTPResponse, nil
+}
+
// NewSecurityMonitoringApi Returns NewSecurityMonitoringApi.
func NewSecurityMonitoringApi(client *datadog.APIClient) *SecurityMonitoringApi {
return &SecurityMonitoringApi{
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_service_level_objectives.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_service_level_objectives.go
new file mode 100644
index 0000000000..03e69f7917
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_service_level_objectives.go
@@ -0,0 +1,266 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV2
+
+import (
+ _context "context"
+ _fmt "fmt"
+ _log "log"
+ _nethttp "net/http"
+ _neturl "net/url"
+ "strings"
+
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// ServiceLevelObjectivesApi service type
+type ServiceLevelObjectivesApi datadog.Service
+
+// CreateSLOReportJob Create a new SLO report.
+// Create a job to generate an SLO report. The report job is processed asynchronously and eventually results in a CSV report being available for download.
+//
+// Check the status of the job and download the CSV report using the returned `report_id`.
+func (a *ServiceLevelObjectivesApi) CreateSLOReportJob(ctx _context.Context, body SloReportCreateRequest) (SLOReportPostResponse, *_nethttp.Response, error) {
+ var (
+ localVarHTTPMethod = _nethttp.MethodPost
+ localVarPostBody interface{}
+ localVarReturnValue SLOReportPostResponse
+ )
+
+ operationId := "v2.CreateSLOReportJob"
+ if a.Client.Cfg.IsUnstableOperationEnabled(operationId) {
+ _log.Printf("WARNING: Using unstable operation '%s'", operationId)
+ } else {
+ return localVarReturnValue, nil, datadog.GenericOpenAPIError{ErrorMessage: _fmt.Sprintf("Unstable operation '%s' is disabled", operationId)}
+ }
+
+ localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.ServiceLevelObjectivesApi.CreateSLOReportJob")
+ if err != nil {
+ return localVarReturnValue, nil, datadog.GenericOpenAPIError{ErrorMessage: err.Error()}
+ }
+
+ localVarPath := localBasePath + "/api/v2/slo/report"
+
+ localVarHeaderParams := make(map[string]string)
+ localVarQueryParams := _neturl.Values{}
+ localVarFormParams := _neturl.Values{}
+ localVarHeaderParams["Content-Type"] = "application/json"
+ localVarHeaderParams["Accept"] = "application/json"
+
+ // body params
+ localVarPostBody = &body
+ datadog.SetAuthKeys(
+ ctx,
+ &localVarHeaderParams,
+ [2]string{"apiKeyAuth", "DD-API-KEY"},
+ [2]string{"appKeyAuth", "DD-APPLICATION-KEY"},
+ )
+ req, err := a.Client.PrepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, nil)
+ if err != nil {
+ return localVarReturnValue, nil, err
+ }
+
+ localVarHTTPResponse, err := a.Client.CallAPI(req)
+ if err != nil || localVarHTTPResponse == nil {
+ return localVarReturnValue, localVarHTTPResponse, err
+ }
+
+ localVarBody, err := datadog.ReadBody(localVarHTTPResponse)
+ if err != nil {
+ return localVarReturnValue, localVarHTTPResponse, err
+ }
+
+ if localVarHTTPResponse.StatusCode >= 300 {
+ newErr := datadog.GenericOpenAPIError{
+ ErrorBody: localVarBody,
+ ErrorMessage: localVarHTTPResponse.Status,
+ }
+ if localVarHTTPResponse.StatusCode == 400 || localVarHTTPResponse.StatusCode == 403 || localVarHTTPResponse.StatusCode == 429 {
+ var v APIErrorResponse
+ err = a.Client.Decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
+ if err != nil {
+ return localVarReturnValue, localVarHTTPResponse, newErr
+ }
+ newErr.ErrorModel = v
+ }
+ return localVarReturnValue, localVarHTTPResponse, newErr
+ }
+
+ err = a.Client.Decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
+ if err != nil {
+ newErr := datadog.GenericOpenAPIError{
+ ErrorBody: localVarBody,
+ ErrorMessage: err.Error(),
+ }
+ return localVarReturnValue, localVarHTTPResponse, newErr
+ }
+
+ return localVarReturnValue, localVarHTTPResponse, nil
+}
+
+// GetSLOReport Get SLO report.
+// Download an SLO report. This can only be performed after the report job has completed.
+//
+// Reports are not guaranteed to exist indefinitely. Datadog recommends that you download the report as soon as it is available.
+func (a *ServiceLevelObjectivesApi) GetSLOReport(ctx _context.Context, reportId string) (string, *_nethttp.Response, error) {
+ var (
+ localVarHTTPMethod = _nethttp.MethodGet
+ localVarPostBody interface{}
+ localVarReturnValue string
+ )
+
+ operationId := "v2.GetSLOReport"
+ if a.Client.Cfg.IsUnstableOperationEnabled(operationId) {
+ _log.Printf("WARNING: Using unstable operation '%s'", operationId)
+ } else {
+ return localVarReturnValue, nil, datadog.GenericOpenAPIError{ErrorMessage: _fmt.Sprintf("Unstable operation '%s' is disabled", operationId)}
+ }
+
+ localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.ServiceLevelObjectivesApi.GetSLOReport")
+ if err != nil {
+ return localVarReturnValue, nil, datadog.GenericOpenAPIError{ErrorMessage: err.Error()}
+ }
+
+ localVarPath := localBasePath + "/api/v2/slo/report/{report_id}/download"
+ localVarPath = strings.Replace(localVarPath, "{"+"report_id"+"}", _neturl.PathEscape(datadog.ParameterToString(reportId, "")), -1)
+
+ localVarHeaderParams := make(map[string]string)
+ localVarQueryParams := _neturl.Values{}
+ localVarFormParams := _neturl.Values{}
+ localVarHeaderParams["Accept"] = "application/json"
+
+ datadog.SetAuthKeys(
+ ctx,
+ &localVarHeaderParams,
+ [2]string{"apiKeyAuth", "DD-API-KEY"},
+ [2]string{"appKeyAuth", "DD-APPLICATION-KEY"},
+ )
+ req, err := a.Client.PrepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, nil)
+ if err != nil {
+ return localVarReturnValue, nil, err
+ }
+
+ localVarHTTPResponse, err := a.Client.CallAPI(req)
+ if err != nil || localVarHTTPResponse == nil {
+ return localVarReturnValue, localVarHTTPResponse, err
+ }
+
+ localVarBody, err := datadog.ReadBody(localVarHTTPResponse)
+ if err != nil {
+ return localVarReturnValue, localVarHTTPResponse, err
+ }
+
+ if localVarHTTPResponse.StatusCode >= 300 {
+ newErr := datadog.GenericOpenAPIError{
+ ErrorBody: localVarBody,
+ ErrorMessage: localVarHTTPResponse.Status,
+ }
+ if localVarHTTPResponse.StatusCode == 400 || localVarHTTPResponse.StatusCode == 403 || localVarHTTPResponse.StatusCode == 404 || localVarHTTPResponse.StatusCode == 429 {
+ var v APIErrorResponse
+ err = a.Client.Decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
+ if err != nil {
+ return localVarReturnValue, localVarHTTPResponse, newErr
+ }
+ newErr.ErrorModel = v
+ }
+ return localVarReturnValue, localVarHTTPResponse, newErr
+ }
+
+ err = a.Client.Decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
+ if err != nil {
+ newErr := datadog.GenericOpenAPIError{
+ ErrorBody: localVarBody,
+ ErrorMessage: err.Error(),
+ }
+ return localVarReturnValue, localVarHTTPResponse, newErr
+ }
+
+ return localVarReturnValue, localVarHTTPResponse, nil
+}
+
+// GetSLOReportJobStatus Get SLO report status.
+// Get the status of the SLO report job.
+func (a *ServiceLevelObjectivesApi) GetSLOReportJobStatus(ctx _context.Context, reportId string) (SLOReportStatusGetResponse, *_nethttp.Response, error) {
+ var (
+ localVarHTTPMethod = _nethttp.MethodGet
+ localVarPostBody interface{}
+ localVarReturnValue SLOReportStatusGetResponse
+ )
+
+ operationId := "v2.GetSLOReportJobStatus"
+ if a.Client.Cfg.IsUnstableOperationEnabled(operationId) {
+ _log.Printf("WARNING: Using unstable operation '%s'", operationId)
+ } else {
+ return localVarReturnValue, nil, datadog.GenericOpenAPIError{ErrorMessage: _fmt.Sprintf("Unstable operation '%s' is disabled", operationId)}
+ }
+
+ localBasePath, err := a.Client.Cfg.ServerURLWithContext(ctx, "v2.ServiceLevelObjectivesApi.GetSLOReportJobStatus")
+ if err != nil {
+ return localVarReturnValue, nil, datadog.GenericOpenAPIError{ErrorMessage: err.Error()}
+ }
+
+ localVarPath := localBasePath + "/api/v2/slo/report/{report_id}/status"
+ localVarPath = strings.Replace(localVarPath, "{"+"report_id"+"}", _neturl.PathEscape(datadog.ParameterToString(reportId, "")), -1)
+
+ localVarHeaderParams := make(map[string]string)
+ localVarQueryParams := _neturl.Values{}
+ localVarFormParams := _neturl.Values{}
+ localVarHeaderParams["Accept"] = "application/json"
+
+ datadog.SetAuthKeys(
+ ctx,
+ &localVarHeaderParams,
+ [2]string{"apiKeyAuth", "DD-API-KEY"},
+ [2]string{"appKeyAuth", "DD-APPLICATION-KEY"},
+ )
+ req, err := a.Client.PrepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, nil)
+ if err != nil {
+ return localVarReturnValue, nil, err
+ }
+
+ localVarHTTPResponse, err := a.Client.CallAPI(req)
+ if err != nil || localVarHTTPResponse == nil {
+ return localVarReturnValue, localVarHTTPResponse, err
+ }
+
+ localVarBody, err := datadog.ReadBody(localVarHTTPResponse)
+ if err != nil {
+ return localVarReturnValue, localVarHTTPResponse, err
+ }
+
+ if localVarHTTPResponse.StatusCode >= 300 {
+ newErr := datadog.GenericOpenAPIError{
+ ErrorBody: localVarBody,
+ ErrorMessage: localVarHTTPResponse.Status,
+ }
+ if localVarHTTPResponse.StatusCode == 400 || localVarHTTPResponse.StatusCode == 403 || localVarHTTPResponse.StatusCode == 404 || localVarHTTPResponse.StatusCode == 429 {
+ var v APIErrorResponse
+ err = a.Client.Decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
+ if err != nil {
+ return localVarReturnValue, localVarHTTPResponse, newErr
+ }
+ newErr.ErrorModel = v
+ }
+ return localVarReturnValue, localVarHTTPResponse, newErr
+ }
+
+ err = a.Client.Decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
+ if err != nil {
+ newErr := datadog.GenericOpenAPIError{
+ ErrorBody: localVarBody,
+ ErrorMessage: err.Error(),
+ }
+ return localVarReturnValue, localVarHTTPResponse, newErr
+ }
+
+ return localVarReturnValue, localVarHTTPResponse, nil
+}
+
+// NewServiceLevelObjectivesApi Returns NewServiceLevelObjectivesApi.
+func NewServiceLevelObjectivesApi(client *datadog.APIClient) *ServiceLevelObjectivesApi {
+ return &ServiceLevelObjectivesApi{
+ Client: client,
+ }
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_usage_metering.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_usage_metering.go
index 15f5935223..43fb6f5ba6 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_usage_metering.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/api_usage_metering.go
@@ -119,6 +119,8 @@ func (r *GetCostByOrgOptionalParameters) WithEndMonth(endMonth time.Time) *GetCo
// [`/historical_cost`](https://docs.datadoghq.com/api/latest/usage-metering/#get-historical-cost-across-your-account)
// instead.
//
+// This endpoint is only accessible for [parent-level organizations](https://docs.datadoghq.com/account_management/multi_organization/).
+//
// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetCostByOrg(ctx _context.Context, startMonth time.Time, o ...GetCostByOrgOptionalParameters) (CostByOrgResponse, *_nethttp.Response, error) {
var (
@@ -250,6 +252,8 @@ func (r *GetEstimatedCostByOrgOptionalParameters) WithEndDate(endDate time.Time)
// Estimated cost data is only available for the current month and previous month
// and is delayed by up to 72 hours from when it was incurred.
// To access historical costs prior to this, use the `/historical_cost` endpoint.
+//
+// This endpoint is only accessible for [parent-level organizations](https://docs.datadoghq.com/account_management/multi_organization/).
func (a *UsageMeteringApi) GetEstimatedCostByOrg(ctx _context.Context, o ...GetEstimatedCostByOrgOptionalParameters) (CostByOrgResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -368,6 +372,8 @@ func (r *GetHistoricalCostByOrgOptionalParameters) WithEndMonth(endMonth time.Ti
// GetHistoricalCostByOrg Get historical cost across your account.
// Get historical cost across multi-org and single root-org accounts.
// Cost data for a given month becomes available no later than the 16th of the following month.
+//
+// This endpoint is only accessible for [parent-level organizations](https://docs.datadoghq.com/account_management/multi_organization/).
func (a *UsageMeteringApi) GetHistoricalCostByOrg(ctx _context.Context, startMonth time.Time, o ...GetHistoricalCostByOrgOptionalParameters) (CostByOrgResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -663,6 +669,8 @@ func (r *GetMonthlyCostAttributionOptionalParameters) WithIncludeDescendants(inc
//
// END
// ```
+//
+// This endpoint is only accessible for [parent-level organizations](https://docs.datadoghq.com/account_management/multi_organization/).
func (a *UsageMeteringApi) GetMonthlyCostAttribution(ctx _context.Context, startMonth time.Time, endMonth time.Time, fields string, o ...GetMonthlyCostAttributionOptionalParameters) (MonthlyCostAttributionResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -784,7 +792,8 @@ func (r *GetProjectedCostOptionalParameters) WithView(view string) *GetProjected
// GetProjectedCost Get projected cost across your account.
// Get projected cost across multi-org and single root-org accounts.
// Projected cost data is only available for the current month and becomes available around the 12th of the month.
-// This endpoint requires the usage_read authorization scope.
+//
+// This endpoint is only accessible for [parent-level organizations](https://docs.datadoghq.com/account_management/multi_organization/).
func (a *UsageMeteringApi) GetProjectedCost(ctx _context.Context, o ...GetProjectedCostOptionalParameters) (ProjectedCostResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -883,7 +892,9 @@ func (r *GetUsageApplicationSecurityMonitoringOptionalParameters) WithEndHr(endH
// GetUsageApplicationSecurityMonitoring Get hourly usage for application security.
// Get hourly usage for application security .
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family)
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family)
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageApplicationSecurityMonitoring(ctx _context.Context, startHr time.Time, o ...GetUsageApplicationSecurityMonitoringOptionalParameters) (UsageApplicationSecurityMonitoringResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -983,7 +994,9 @@ func (r *GetUsageLambdaTracedInvocationsOptionalParameters) WithEndHr(endHr time
// GetUsageLambdaTracedInvocations Get hourly usage for Lambda traced invocations.
// Get hourly usage for Lambda traced invocations.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family)
+// **Note:** This endpoint has been deprecated.. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family)
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageLambdaTracedInvocations(ctx _context.Context, startHr time.Time, o ...GetUsageLambdaTracedInvocationsOptionalParameters) (UsageLambdaTracedInvocationsResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
@@ -1083,7 +1096,9 @@ func (r *GetUsageObservabilityPipelinesOptionalParameters) WithEndHr(endHr time.
// GetUsageObservabilityPipelines Get hourly usage for observability pipelines.
// Get hourly usage for observability pipelines.
-// **Note:** hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family)
+// **Note:** This endpoint has been deprecated. Hourly usage data for all products is now available in the [Get hourly usage by product family API](https://docs.datadoghq.com/api/latest/usage-metering/#get-hourly-usage-by-product-family)
+//
+// Deprecated: This API is deprecated.
func (a *UsageMeteringApi) GetUsageObservabilityPipelines(ctx _context.Context, startHr time.Time, o ...GetUsageObservabilityPipelinesOptionalParameters) (UsageObservabilityPipelinesResponse, *_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodGet
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/doc.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/doc.go
index 84f0f1d129..019df1e14a 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/doc.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/doc.go
@@ -27,6 +27,18 @@
// - [CIVisibilityTestsApi.AggregateCIAppTestEvents]
// - [CIVisibilityTestsApi.ListCIAppTestEvents]
// - [CIVisibilityTestsApi.SearchCIAppTestEvents]
+// - [CSMThreatsApi.CreateCSMThreatsAgentRule]
+// - [CSMThreatsApi.CreateCloudWorkloadSecurityAgentRule]
+// - [CSMThreatsApi.DeleteCSMThreatsAgentRule]
+// - [CSMThreatsApi.DeleteCloudWorkloadSecurityAgentRule]
+// - [CSMThreatsApi.DownloadCSMThreatsPolicy]
+// - [CSMThreatsApi.DownloadCloudWorkloadPolicyFile]
+// - [CSMThreatsApi.GetCSMThreatsAgentRule]
+// - [CSMThreatsApi.GetCloudWorkloadSecurityAgentRule]
+// - [CSMThreatsApi.ListCSMThreatsAgentRules]
+// - [CSMThreatsApi.ListCloudWorkloadSecurityAgentRules]
+// - [CSMThreatsApi.UpdateCSMThreatsAgentRule]
+// - [CSMThreatsApi.UpdateCloudWorkloadSecurityAgentRule]
// - [CaseManagementApi.ArchiveCase]
// - [CaseManagementApi.AssignCase]
// - [CaseManagementApi.CreateCase]
@@ -50,18 +62,6 @@
// - [CloudCostManagementApi.ListCostAzureUCConfigs]
// - [CloudCostManagementApi.UpdateCostAWSCURConfig]
// - [CloudCostManagementApi.UpdateCostAzureUCConfigs]
-// - [CloudWorkloadSecurityApi.CreateCSMThreatsAgentRule]
-// - [CloudWorkloadSecurityApi.CreateCloudWorkloadSecurityAgentRule]
-// - [CloudWorkloadSecurityApi.DeleteCSMThreatsAgentRule]
-// - [CloudWorkloadSecurityApi.DeleteCloudWorkloadSecurityAgentRule]
-// - [CloudWorkloadSecurityApi.DownloadCSMThreatsPolicy]
-// - [CloudWorkloadSecurityApi.DownloadCloudWorkloadPolicyFile]
-// - [CloudWorkloadSecurityApi.GetCSMThreatsAgentRule]
-// - [CloudWorkloadSecurityApi.GetCloudWorkloadSecurityAgentRule]
-// - [CloudWorkloadSecurityApi.ListCSMThreatsAgentRules]
-// - [CloudWorkloadSecurityApi.ListCloudWorkloadSecurityAgentRules]
-// - [CloudWorkloadSecurityApi.UpdateCSMThreatsAgentRule]
-// - [CloudWorkloadSecurityApi.UpdateCloudWorkloadSecurityAgentRule]
// - [CloudflareIntegrationApi.CreateCloudflareAccount]
// - [CloudflareIntegrationApi.DeleteCloudflareAccount]
// - [CloudflareIntegrationApi.GetCloudflareAccount]
@@ -262,6 +262,7 @@
// - [SecurityMonitoringApi.UpdateSecurityFilter]
// - [SecurityMonitoringApi.UpdateSecurityMonitoringRule]
// - [SecurityMonitoringApi.UpdateSecurityMonitoringSuppression]
+// - [SecurityMonitoringApi.ValidateSecurityMonitoringRule]
// - [SensitiveDataScannerApi.CreateScanningGroup]
// - [SensitiveDataScannerApi.CreateScanningRule]
// - [SensitiveDataScannerApi.DeleteScanningGroup]
@@ -281,6 +282,9 @@
// - [ServiceDefinitionApi.DeleteServiceDefinition]
// - [ServiceDefinitionApi.GetServiceDefinition]
// - [ServiceDefinitionApi.ListServiceDefinitions]
+// - [ServiceLevelObjectivesApi.CreateSLOReportJob]
+// - [ServiceLevelObjectivesApi.GetSLOReport]
+// - [ServiceLevelObjectivesApi.GetSLOReportJobStatus]
// - [ServiceScorecardsApi.CreateScorecardOutcomesBatch]
// - [ServiceScorecardsApi.CreateScorecardRule]
// - [ServiceScorecardsApi.DeleteScorecardRule]
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_create_data.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_create_data.go
index df34000e01..d1d83f0f93 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_create_data.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_create_data.go
@@ -14,7 +14,7 @@ import (
type AuthNMappingCreateData struct {
// Key/Value pair of attributes used for create request.
Attributes *AuthNMappingCreateAttributes `json:"attributes,omitempty"`
- // Relationship of AuthN Mapping create object to Role.
+ // Relationship of AuthN Mapping create object to a Role or Team.
Relationships *AuthNMappingCreateRelationships `json:"relationships,omitempty"`
// AuthN Mappings resource type.
Type AuthNMappingsType `json:"type"`
@@ -167,9 +167,6 @@ func (o *AuthNMappingCreateData) UnmarshalJSON(bytes []byte) (err error) {
hasInvalidField = true
}
o.Attributes = all.Attributes
- if all.Relationships != nil && all.Relationships.UnparsedObject != nil && o.UnparsedObject == nil {
- hasInvalidField = true
- }
o.Relationships = all.Relationships
if !all.Type.IsValid() {
hasInvalidField = true
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_create_relationships.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_create_relationships.go
index 8dee6bcfc9..c5d48e81d2 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_create_relationships.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_create_relationships.go
@@ -8,104 +8,98 @@ import (
"github.com/DataDog/datadog-api-client-go/v2/api/datadog"
)
-// AuthNMappingCreateRelationships Relationship of AuthN Mapping create object to Role.
+// AuthNMappingCreateRelationships - Relationship of AuthN Mapping create object to a Role or Team.
type AuthNMappingCreateRelationships struct {
- // Relationship to role.
- Role *RelationshipToRole `json:"role,omitempty"`
- // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
- UnparsedObject map[string]interface{} `json:"-"`
- AdditionalProperties map[string]interface{}
-}
+ AuthNMappingRelationshipToRole *AuthNMappingRelationshipToRole
+ AuthNMappingRelationshipToTeam *AuthNMappingRelationshipToTeam
-// NewAuthNMappingCreateRelationships instantiates a new AuthNMappingCreateRelationships object.
-// This constructor will assign default values to properties that have it defined,
-// and makes sure properties required by API are set, but the set of arguments
-// will change when the set of required properties is changed.
-func NewAuthNMappingCreateRelationships() *AuthNMappingCreateRelationships {
- this := AuthNMappingCreateRelationships{}
- return &this
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject interface{}
}
-// NewAuthNMappingCreateRelationshipsWithDefaults instantiates a new AuthNMappingCreateRelationships object.
-// This constructor will only assign default values to properties that have it defined,
-// but it doesn't guarantee that properties required by API are set.
-func NewAuthNMappingCreateRelationshipsWithDefaults() *AuthNMappingCreateRelationships {
- this := AuthNMappingCreateRelationships{}
- return &this
+// AuthNMappingRelationshipToRoleAsAuthNMappingCreateRelationships is a convenience function that returns AuthNMappingRelationshipToRole wrapped in AuthNMappingCreateRelationships.
+func AuthNMappingRelationshipToRoleAsAuthNMappingCreateRelationships(v *AuthNMappingRelationshipToRole) AuthNMappingCreateRelationships {
+ return AuthNMappingCreateRelationships{AuthNMappingRelationshipToRole: v}
}
-// GetRole returns the Role field value if set, zero value otherwise.
-func (o *AuthNMappingCreateRelationships) GetRole() RelationshipToRole {
- if o == nil || o.Role == nil {
- var ret RelationshipToRole
- return ret
- }
- return *o.Role
+// AuthNMappingRelationshipToTeamAsAuthNMappingCreateRelationships is a convenience function that returns AuthNMappingRelationshipToTeam wrapped in AuthNMappingCreateRelationships.
+func AuthNMappingRelationshipToTeamAsAuthNMappingCreateRelationships(v *AuthNMappingRelationshipToTeam) AuthNMappingCreateRelationships {
+ return AuthNMappingCreateRelationships{AuthNMappingRelationshipToTeam: v}
}
-// GetRoleOk returns a tuple with the Role field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *AuthNMappingCreateRelationships) GetRoleOk() (*RelationshipToRole, bool) {
- if o == nil || o.Role == nil {
- return nil, false
+// UnmarshalJSON turns data into one of the pointers in the struct.
+func (obj *AuthNMappingCreateRelationships) UnmarshalJSON(data []byte) error {
+ var err error
+ match := 0
+ // try to unmarshal data into AuthNMappingRelationshipToRole
+ err = datadog.Unmarshal(data, &obj.AuthNMappingRelationshipToRole)
+ if err == nil {
+ if obj.AuthNMappingRelationshipToRole != nil && obj.AuthNMappingRelationshipToRole.UnparsedObject == nil {
+ jsonAuthNMappingRelationshipToRole, _ := datadog.Marshal(obj.AuthNMappingRelationshipToRole)
+ if string(jsonAuthNMappingRelationshipToRole) == "{}" { // empty struct
+ obj.AuthNMappingRelationshipToRole = nil
+ } else {
+ match++
+ }
+ } else {
+ obj.AuthNMappingRelationshipToRole = nil
+ }
+ } else {
+ obj.AuthNMappingRelationshipToRole = nil
}
- return o.Role, true
-}
-
-// HasRole returns a boolean if a field has been set.
-func (o *AuthNMappingCreateRelationships) HasRole() bool {
- return o != nil && o.Role != nil
-}
-// SetRole gets a reference to the given RelationshipToRole and assigns it to the Role field.
-func (o *AuthNMappingCreateRelationships) SetRole(v RelationshipToRole) {
- o.Role = &v
-}
-
-// MarshalJSON serializes the struct using spec logic.
-func (o AuthNMappingCreateRelationships) MarshalJSON() ([]byte, error) {
- toSerialize := map[string]interface{}{}
- if o.UnparsedObject != nil {
- return datadog.Marshal(o.UnparsedObject)
- }
- if o.Role != nil {
- toSerialize["role"] = o.Role
+ // try to unmarshal data into AuthNMappingRelationshipToTeam
+ err = datadog.Unmarshal(data, &obj.AuthNMappingRelationshipToTeam)
+ if err == nil {
+ if obj.AuthNMappingRelationshipToTeam != nil && obj.AuthNMappingRelationshipToTeam.UnparsedObject == nil {
+ jsonAuthNMappingRelationshipToTeam, _ := datadog.Marshal(obj.AuthNMappingRelationshipToTeam)
+ if string(jsonAuthNMappingRelationshipToTeam) == "{}" { // empty struct
+ obj.AuthNMappingRelationshipToTeam = nil
+ } else {
+ match++
+ }
+ } else {
+ obj.AuthNMappingRelationshipToTeam = nil
+ }
+ } else {
+ obj.AuthNMappingRelationshipToTeam = nil
}
- for key, value := range o.AdditionalProperties {
- toSerialize[key] = value
+ if match != 1 { // more than 1 match
+ // reset to nil
+ obj.AuthNMappingRelationshipToRole = nil
+ obj.AuthNMappingRelationshipToTeam = nil
+ return datadog.Unmarshal(data, &obj.UnparsedObject)
}
- return datadog.Marshal(toSerialize)
+ return nil // exactly one match
}
-// UnmarshalJSON deserializes the given payload.
-func (o *AuthNMappingCreateRelationships) UnmarshalJSON(bytes []byte) (err error) {
- all := struct {
- Role *RelationshipToRole `json:"role,omitempty"`
- }{}
- if err = datadog.Unmarshal(bytes, &all); err != nil {
- return datadog.Unmarshal(bytes, &o.UnparsedObject)
+// MarshalJSON turns data from the first non-nil pointers in the struct to JSON.
+func (obj AuthNMappingCreateRelationships) MarshalJSON() ([]byte, error) {
+ if obj.AuthNMappingRelationshipToRole != nil {
+ return datadog.Marshal(&obj.AuthNMappingRelationshipToRole)
}
- additionalProperties := make(map[string]interface{})
- if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
- datadog.DeleteKeys(additionalProperties, &[]string{"role"})
- } else {
- return err
+
+ if obj.AuthNMappingRelationshipToTeam != nil {
+ return datadog.Marshal(&obj.AuthNMappingRelationshipToTeam)
}
- hasInvalidField := false
- if all.Role != nil && all.Role.UnparsedObject != nil && o.UnparsedObject == nil {
- hasInvalidField = true
+ if obj.UnparsedObject != nil {
+ return datadog.Marshal(obj.UnparsedObject)
}
- o.Role = all.Role
+ return nil, nil // no data in oneOf schemas
+}
- if len(additionalProperties) > 0 {
- o.AdditionalProperties = additionalProperties
+// GetActualInstance returns the actual instance.
+func (obj *AuthNMappingCreateRelationships) GetActualInstance() interface{} {
+ if obj.AuthNMappingRelationshipToRole != nil {
+ return obj.AuthNMappingRelationshipToRole
}
- if hasInvalidField {
- return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ if obj.AuthNMappingRelationshipToTeam != nil {
+ return obj.AuthNMappingRelationshipToTeam
}
+ // all schemas are nil
return nil
}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_included.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_included.go
index 73163d9996..7b43d49de8 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_included.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_included.go
@@ -12,6 +12,7 @@ import (
type AuthNMappingIncluded struct {
SAMLAssertionAttribute *SAMLAssertionAttribute
Role *Role
+ AuthNMappingTeam *AuthNMappingTeam
// UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
UnparsedObject interface{}
@@ -27,6 +28,11 @@ func RoleAsAuthNMappingIncluded(v *Role) AuthNMappingIncluded {
return AuthNMappingIncluded{Role: v}
}
+// AuthNMappingTeamAsAuthNMappingIncluded is a convenience function that returns AuthNMappingTeam wrapped in AuthNMappingIncluded.
+func AuthNMappingTeamAsAuthNMappingIncluded(v *AuthNMappingTeam) AuthNMappingIncluded {
+ return AuthNMappingIncluded{AuthNMappingTeam: v}
+}
+
// UnmarshalJSON turns data into one of the pointers in the struct.
func (obj *AuthNMappingIncluded) UnmarshalJSON(data []byte) error {
var err error
@@ -65,10 +71,28 @@ func (obj *AuthNMappingIncluded) UnmarshalJSON(data []byte) error {
obj.Role = nil
}
+ // try to unmarshal data into AuthNMappingTeam
+ err = datadog.Unmarshal(data, &obj.AuthNMappingTeam)
+ if err == nil {
+ if obj.AuthNMappingTeam != nil && obj.AuthNMappingTeam.UnparsedObject == nil {
+ jsonAuthNMappingTeam, _ := datadog.Marshal(obj.AuthNMappingTeam)
+ if string(jsonAuthNMappingTeam) == "{}" { // empty struct
+ obj.AuthNMappingTeam = nil
+ } else {
+ match++
+ }
+ } else {
+ obj.AuthNMappingTeam = nil
+ }
+ } else {
+ obj.AuthNMappingTeam = nil
+ }
+
if match != 1 { // more than 1 match
// reset to nil
obj.SAMLAssertionAttribute = nil
obj.Role = nil
+ obj.AuthNMappingTeam = nil
return datadog.Unmarshal(data, &obj.UnparsedObject)
}
return nil // exactly one match
@@ -84,6 +108,10 @@ func (obj AuthNMappingIncluded) MarshalJSON() ([]byte, error) {
return datadog.Marshal(&obj.Role)
}
+ if obj.AuthNMappingTeam != nil {
+ return datadog.Marshal(&obj.AuthNMappingTeam)
+ }
+
if obj.UnparsedObject != nil {
return datadog.Marshal(obj.UnparsedObject)
}
@@ -100,6 +128,10 @@ func (obj *AuthNMappingIncluded) GetActualInstance() interface{} {
return obj.Role
}
+ if obj.AuthNMappingTeam != nil {
+ return obj.AuthNMappingTeam
+ }
+
// all schemas are nil
return nil
}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_relationship_to_role.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_relationship_to_role.go
new file mode 100644
index 0000000000..220a1117ff
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_relationship_to_role.go
@@ -0,0 +1,110 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV2
+
+import (
+ "fmt"
+
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// AuthNMappingRelationshipToRole Relationship of AuthN Mapping to a Role.
+type AuthNMappingRelationshipToRole struct {
+ // Relationship to role.
+ Role RelationshipToRole `json:"role"`
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject map[string]interface{} `json:"-"`
+ AdditionalProperties map[string]interface{}
+}
+
+// NewAuthNMappingRelationshipToRole instantiates a new AuthNMappingRelationshipToRole object.
+// This constructor will assign default values to properties that have it defined,
+// and makes sure properties required by API are set, but the set of arguments
+// will change when the set of required properties is changed.
+func NewAuthNMappingRelationshipToRole(role RelationshipToRole) *AuthNMappingRelationshipToRole {
+ this := AuthNMappingRelationshipToRole{}
+ this.Role = role
+ return &this
+}
+
+// NewAuthNMappingRelationshipToRoleWithDefaults instantiates a new AuthNMappingRelationshipToRole object.
+// This constructor will only assign default values to properties that have it defined,
+// but it doesn't guarantee that properties required by API are set.
+func NewAuthNMappingRelationshipToRoleWithDefaults() *AuthNMappingRelationshipToRole {
+ this := AuthNMappingRelationshipToRole{}
+ return &this
+}
+
+// GetRole returns the Role field value.
+func (o *AuthNMappingRelationshipToRole) GetRole() RelationshipToRole {
+ if o == nil {
+ var ret RelationshipToRole
+ return ret
+ }
+ return o.Role
+}
+
+// GetRoleOk returns a tuple with the Role field value
+// and a boolean to check if the value has been set.
+func (o *AuthNMappingRelationshipToRole) GetRoleOk() (*RelationshipToRole, bool) {
+ if o == nil {
+ return nil, false
+ }
+ return &o.Role, true
+}
+
+// SetRole sets field value.
+func (o *AuthNMappingRelationshipToRole) SetRole(v RelationshipToRole) {
+ o.Role = v
+}
+
+// MarshalJSON serializes the struct using spec logic.
+func (o AuthNMappingRelationshipToRole) MarshalJSON() ([]byte, error) {
+ toSerialize := map[string]interface{}{}
+ if o.UnparsedObject != nil {
+ return datadog.Marshal(o.UnparsedObject)
+ }
+ toSerialize["role"] = o.Role
+
+ for key, value := range o.AdditionalProperties {
+ toSerialize[key] = value
+ }
+ return datadog.Marshal(toSerialize)
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (o *AuthNMappingRelationshipToRole) UnmarshalJSON(bytes []byte) (err error) {
+ all := struct {
+ Role *RelationshipToRole `json:"role"`
+ }{}
+ if err = datadog.Unmarshal(bytes, &all); err != nil {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+ if all.Role == nil {
+ return fmt.Errorf("required field role missing")
+ }
+ additionalProperties := make(map[string]interface{})
+ if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
+ datadog.DeleteKeys(additionalProperties, &[]string{"role"})
+ } else {
+ return err
+ }
+
+ hasInvalidField := false
+ if all.Role.UnparsedObject != nil && o.UnparsedObject == nil {
+ hasInvalidField = true
+ }
+ o.Role = *all.Role
+
+ if len(additionalProperties) > 0 {
+ o.AdditionalProperties = additionalProperties
+ }
+
+ if hasInvalidField {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_relationship_to_team.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_relationship_to_team.go
new file mode 100644
index 0000000000..715722d50d
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_relationship_to_team.go
@@ -0,0 +1,110 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV2
+
+import (
+ "fmt"
+
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// AuthNMappingRelationshipToTeam Relationship of AuthN Mapping to a Team.
+type AuthNMappingRelationshipToTeam struct {
+ // Relationship to team.
+ Team RelationshipToTeam `json:"team"`
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject map[string]interface{} `json:"-"`
+ AdditionalProperties map[string]interface{}
+}
+
+// NewAuthNMappingRelationshipToTeam instantiates a new AuthNMappingRelationshipToTeam object.
+// This constructor will assign default values to properties that have it defined,
+// and makes sure properties required by API are set, but the set of arguments
+// will change when the set of required properties is changed.
+func NewAuthNMappingRelationshipToTeam(team RelationshipToTeam) *AuthNMappingRelationshipToTeam {
+ this := AuthNMappingRelationshipToTeam{}
+ this.Team = team
+ return &this
+}
+
+// NewAuthNMappingRelationshipToTeamWithDefaults instantiates a new AuthNMappingRelationshipToTeam object.
+// This constructor will only assign default values to properties that have it defined,
+// but it doesn't guarantee that properties required by API are set.
+func NewAuthNMappingRelationshipToTeamWithDefaults() *AuthNMappingRelationshipToTeam {
+ this := AuthNMappingRelationshipToTeam{}
+ return &this
+}
+
+// GetTeam returns the Team field value.
+func (o *AuthNMappingRelationshipToTeam) GetTeam() RelationshipToTeam {
+ if o == nil {
+ var ret RelationshipToTeam
+ return ret
+ }
+ return o.Team
+}
+
+// GetTeamOk returns a tuple with the Team field value
+// and a boolean to check if the value has been set.
+func (o *AuthNMappingRelationshipToTeam) GetTeamOk() (*RelationshipToTeam, bool) {
+ if o == nil {
+ return nil, false
+ }
+ return &o.Team, true
+}
+
+// SetTeam sets field value.
+func (o *AuthNMappingRelationshipToTeam) SetTeam(v RelationshipToTeam) {
+ o.Team = v
+}
+
+// MarshalJSON serializes the struct using spec logic.
+func (o AuthNMappingRelationshipToTeam) MarshalJSON() ([]byte, error) {
+ toSerialize := map[string]interface{}{}
+ if o.UnparsedObject != nil {
+ return datadog.Marshal(o.UnparsedObject)
+ }
+ toSerialize["team"] = o.Team
+
+ for key, value := range o.AdditionalProperties {
+ toSerialize[key] = value
+ }
+ return datadog.Marshal(toSerialize)
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (o *AuthNMappingRelationshipToTeam) UnmarshalJSON(bytes []byte) (err error) {
+ all := struct {
+ Team *RelationshipToTeam `json:"team"`
+ }{}
+ if err = datadog.Unmarshal(bytes, &all); err != nil {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+ if all.Team == nil {
+ return fmt.Errorf("required field team missing")
+ }
+ additionalProperties := make(map[string]interface{})
+ if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
+ datadog.DeleteKeys(additionalProperties, &[]string{"team"})
+ } else {
+ return err
+ }
+
+ hasInvalidField := false
+ if all.Team.UnparsedObject != nil && o.UnparsedObject == nil {
+ hasInvalidField = true
+ }
+ o.Team = *all.Team
+
+ if len(additionalProperties) > 0 {
+ o.AdditionalProperties = additionalProperties
+ }
+
+ if hasInvalidField {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_relationships.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_relationships.go
index a67a97e4ac..bef6db8e15 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_relationships.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_relationships.go
@@ -14,6 +14,8 @@ type AuthNMappingRelationships struct {
Role *RelationshipToRole `json:"role,omitempty"`
// AuthN Mapping relationship to SAML Assertion Attribute.
SamlAssertionAttribute *RelationshipToSAMLAssertionAttribute `json:"saml_assertion_attribute,omitempty"`
+ // Relationship to team.
+ Team *RelationshipToTeam `json:"team,omitempty"`
// UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
UnparsedObject map[string]interface{} `json:"-"`
AdditionalProperties map[string]interface{}
@@ -92,6 +94,34 @@ func (o *AuthNMappingRelationships) SetSamlAssertionAttribute(v RelationshipToSA
o.SamlAssertionAttribute = &v
}
+// GetTeam returns the Team field value if set, zero value otherwise.
+func (o *AuthNMappingRelationships) GetTeam() RelationshipToTeam {
+ if o == nil || o.Team == nil {
+ var ret RelationshipToTeam
+ return ret
+ }
+ return *o.Team
+}
+
+// GetTeamOk returns a tuple with the Team field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *AuthNMappingRelationships) GetTeamOk() (*RelationshipToTeam, bool) {
+ if o == nil || o.Team == nil {
+ return nil, false
+ }
+ return o.Team, true
+}
+
+// HasTeam returns a boolean if a field has been set.
+func (o *AuthNMappingRelationships) HasTeam() bool {
+ return o != nil && o.Team != nil
+}
+
+// SetTeam gets a reference to the given RelationshipToTeam and assigns it to the Team field.
+func (o *AuthNMappingRelationships) SetTeam(v RelationshipToTeam) {
+ o.Team = &v
+}
+
// MarshalJSON serializes the struct using spec logic.
func (o AuthNMappingRelationships) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
@@ -104,6 +134,9 @@ func (o AuthNMappingRelationships) MarshalJSON() ([]byte, error) {
if o.SamlAssertionAttribute != nil {
toSerialize["saml_assertion_attribute"] = o.SamlAssertionAttribute
}
+ if o.Team != nil {
+ toSerialize["team"] = o.Team
+ }
for key, value := range o.AdditionalProperties {
toSerialize[key] = value
@@ -116,13 +149,14 @@ func (o *AuthNMappingRelationships) UnmarshalJSON(bytes []byte) (err error) {
all := struct {
Role *RelationshipToRole `json:"role,omitempty"`
SamlAssertionAttribute *RelationshipToSAMLAssertionAttribute `json:"saml_assertion_attribute,omitempty"`
+ Team *RelationshipToTeam `json:"team,omitempty"`
}{}
if err = datadog.Unmarshal(bytes, &all); err != nil {
return datadog.Unmarshal(bytes, &o.UnparsedObject)
}
additionalProperties := make(map[string]interface{})
if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
- datadog.DeleteKeys(additionalProperties, &[]string{"role", "saml_assertion_attribute"})
+ datadog.DeleteKeys(additionalProperties, &[]string{"role", "saml_assertion_attribute", "team"})
} else {
return err
}
@@ -136,6 +170,10 @@ func (o *AuthNMappingRelationships) UnmarshalJSON(bytes []byte) (err error) {
hasInvalidField = true
}
o.SamlAssertionAttribute = all.SamlAssertionAttribute
+ if all.Team != nil && all.Team.UnparsedObject != nil && o.UnparsedObject == nil {
+ hasInvalidField = true
+ }
+ o.Team = all.Team
if len(additionalProperties) > 0 {
o.AdditionalProperties = additionalProperties
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_team.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_team.go
new file mode 100644
index 0000000000..1b74ee6dcc
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_team.go
@@ -0,0 +1,189 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV2
+
+import (
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// AuthNMappingTeam Team.
+type AuthNMappingTeam struct {
+ // Team attributes.
+ Attributes *AuthNMappingTeamAttributes `json:"attributes,omitempty"`
+ // The ID of the Team.
+ Id *string `json:"id,omitempty"`
+ // Team type
+ Type *TeamType `json:"type,omitempty"`
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject map[string]interface{} `json:"-"`
+ AdditionalProperties map[string]interface{}
+}
+
+// NewAuthNMappingTeam instantiates a new AuthNMappingTeam object.
+// This constructor will assign default values to properties that have it defined,
+// and makes sure properties required by API are set, but the set of arguments
+// will change when the set of required properties is changed.
+func NewAuthNMappingTeam() *AuthNMappingTeam {
+ this := AuthNMappingTeam{}
+ var typeVar TeamType = TEAMTYPE_TEAM
+ this.Type = &typeVar
+ return &this
+}
+
+// NewAuthNMappingTeamWithDefaults instantiates a new AuthNMappingTeam object.
+// This constructor will only assign default values to properties that have it defined,
+// but it doesn't guarantee that properties required by API are set.
+func NewAuthNMappingTeamWithDefaults() *AuthNMappingTeam {
+ this := AuthNMappingTeam{}
+ var typeVar TeamType = TEAMTYPE_TEAM
+ this.Type = &typeVar
+ return &this
+}
+
+// GetAttributes returns the Attributes field value if set, zero value otherwise.
+func (o *AuthNMappingTeam) GetAttributes() AuthNMappingTeamAttributes {
+ if o == nil || o.Attributes == nil {
+ var ret AuthNMappingTeamAttributes
+ return ret
+ }
+ return *o.Attributes
+}
+
+// GetAttributesOk returns a tuple with the Attributes field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *AuthNMappingTeam) GetAttributesOk() (*AuthNMappingTeamAttributes, bool) {
+ if o == nil || o.Attributes == nil {
+ return nil, false
+ }
+ return o.Attributes, true
+}
+
+// HasAttributes returns a boolean if a field has been set.
+func (o *AuthNMappingTeam) HasAttributes() bool {
+ return o != nil && o.Attributes != nil
+}
+
+// SetAttributes gets a reference to the given AuthNMappingTeamAttributes and assigns it to the Attributes field.
+func (o *AuthNMappingTeam) SetAttributes(v AuthNMappingTeamAttributes) {
+ o.Attributes = &v
+}
+
+// GetId returns the Id field value if set, zero value otherwise.
+func (o *AuthNMappingTeam) GetId() string {
+ if o == nil || o.Id == nil {
+ var ret string
+ return ret
+ }
+ return *o.Id
+}
+
+// GetIdOk returns a tuple with the Id field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *AuthNMappingTeam) GetIdOk() (*string, bool) {
+ if o == nil || o.Id == nil {
+ return nil, false
+ }
+ return o.Id, true
+}
+
+// HasId returns a boolean if a field has been set.
+func (o *AuthNMappingTeam) HasId() bool {
+ return o != nil && o.Id != nil
+}
+
+// SetId gets a reference to the given string and assigns it to the Id field.
+func (o *AuthNMappingTeam) SetId(v string) {
+ o.Id = &v
+}
+
+// GetType returns the Type field value if set, zero value otherwise.
+func (o *AuthNMappingTeam) GetType() TeamType {
+ if o == nil || o.Type == nil {
+ var ret TeamType
+ return ret
+ }
+ return *o.Type
+}
+
+// GetTypeOk returns a tuple with the Type field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *AuthNMappingTeam) GetTypeOk() (*TeamType, bool) {
+ if o == nil || o.Type == nil {
+ return nil, false
+ }
+ return o.Type, true
+}
+
+// HasType returns a boolean if a field has been set.
+func (o *AuthNMappingTeam) HasType() bool {
+ return o != nil && o.Type != nil
+}
+
+// SetType gets a reference to the given TeamType and assigns it to the Type field.
+func (o *AuthNMappingTeam) SetType(v TeamType) {
+ o.Type = &v
+}
+
+// MarshalJSON serializes the struct using spec logic.
+func (o AuthNMappingTeam) MarshalJSON() ([]byte, error) {
+ toSerialize := map[string]interface{}{}
+ if o.UnparsedObject != nil {
+ return datadog.Marshal(o.UnparsedObject)
+ }
+ if o.Attributes != nil {
+ toSerialize["attributes"] = o.Attributes
+ }
+ if o.Id != nil {
+ toSerialize["id"] = o.Id
+ }
+ if o.Type != nil {
+ toSerialize["type"] = o.Type
+ }
+
+ for key, value := range o.AdditionalProperties {
+ toSerialize[key] = value
+ }
+ return datadog.Marshal(toSerialize)
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (o *AuthNMappingTeam) UnmarshalJSON(bytes []byte) (err error) {
+ all := struct {
+ Attributes *AuthNMappingTeamAttributes `json:"attributes,omitempty"`
+ Id *string `json:"id,omitempty"`
+ Type *TeamType `json:"type,omitempty"`
+ }{}
+ if err = datadog.Unmarshal(bytes, &all); err != nil {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+ additionalProperties := make(map[string]interface{})
+ if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
+ datadog.DeleteKeys(additionalProperties, &[]string{"attributes", "id", "type"})
+ } else {
+ return err
+ }
+
+ hasInvalidField := false
+ if all.Attributes != nil && all.Attributes.UnparsedObject != nil && o.UnparsedObject == nil {
+ hasInvalidField = true
+ }
+ o.Attributes = all.Attributes
+ o.Id = all.Id
+ if all.Type != nil && !all.Type.IsValid() {
+ hasInvalidField = true
+ } else {
+ o.Type = all.Type
+ }
+
+ if len(additionalProperties) > 0 {
+ o.AdditionalProperties = additionalProperties
+ }
+
+ if hasInvalidField {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_team_attributes.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_team_attributes.go
new file mode 100644
index 0000000000..e8fef2b78e
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_team_attributes.go
@@ -0,0 +1,345 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV2
+
+import (
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// AuthNMappingTeamAttributes Team attributes.
+type AuthNMappingTeamAttributes struct {
+ // Unicode representation of the avatar for the team, limited to a single grapheme
+ Avatar datadog.NullableString `json:"avatar,omitempty"`
+ // Banner selection for the team
+ Banner datadog.NullableInt64 `json:"banner,omitempty"`
+ // The team's identifier
+ Handle *string `json:"handle,omitempty"`
+ // The number of links belonging to the team
+ LinkCount *int32 `json:"link_count,omitempty"`
+ // The name of the team
+ Name *string `json:"name,omitempty"`
+ // A brief summary of the team, derived from the `description`
+ Summary datadog.NullableString `json:"summary,omitempty"`
+ // The number of users belonging to the team
+ UserCount *int32 `json:"user_count,omitempty"`
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject map[string]interface{} `json:"-"`
+ AdditionalProperties map[string]interface{}
+}
+
+// NewAuthNMappingTeamAttributes instantiates a new AuthNMappingTeamAttributes object.
+// This constructor will assign default values to properties that have it defined,
+// and makes sure properties required by API are set, but the set of arguments
+// will change when the set of required properties is changed.
+func NewAuthNMappingTeamAttributes() *AuthNMappingTeamAttributes {
+ this := AuthNMappingTeamAttributes{}
+ return &this
+}
+
+// NewAuthNMappingTeamAttributesWithDefaults instantiates a new AuthNMappingTeamAttributes object.
+// This constructor will only assign default values to properties that have it defined,
+// but it doesn't guarantee that properties required by API are set.
+func NewAuthNMappingTeamAttributesWithDefaults() *AuthNMappingTeamAttributes {
+ this := AuthNMappingTeamAttributes{}
+ return &this
+}
+
+// GetAvatar returns the Avatar field value if set, zero value otherwise (both if not set or set to explicit null).
+func (o *AuthNMappingTeamAttributes) GetAvatar() string {
+ if o == nil || o.Avatar.Get() == nil {
+ var ret string
+ return ret
+ }
+ return *o.Avatar.Get()
+}
+
+// GetAvatarOk returns a tuple with the Avatar field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+// NOTE: If the value is an explicit nil, `nil, true` will be returned.
+func (o *AuthNMappingTeamAttributes) GetAvatarOk() (*string, bool) {
+ if o == nil {
+ return nil, false
+ }
+ return o.Avatar.Get(), o.Avatar.IsSet()
+}
+
+// HasAvatar returns a boolean if a field has been set.
+func (o *AuthNMappingTeamAttributes) HasAvatar() bool {
+ return o != nil && o.Avatar.IsSet()
+}
+
+// SetAvatar gets a reference to the given datadog.NullableString and assigns it to the Avatar field.
+func (o *AuthNMappingTeamAttributes) SetAvatar(v string) {
+ o.Avatar.Set(&v)
+}
+
+// SetAvatarNil sets the value for Avatar to be an explicit nil.
+func (o *AuthNMappingTeamAttributes) SetAvatarNil() {
+ o.Avatar.Set(nil)
+}
+
+// UnsetAvatar ensures that no value is present for Avatar, not even an explicit nil.
+func (o *AuthNMappingTeamAttributes) UnsetAvatar() {
+ o.Avatar.Unset()
+}
+
+// GetBanner returns the Banner field value if set, zero value otherwise (both if not set or set to explicit null).
+func (o *AuthNMappingTeamAttributes) GetBanner() int64 {
+ if o == nil || o.Banner.Get() == nil {
+ var ret int64
+ return ret
+ }
+ return *o.Banner.Get()
+}
+
+// GetBannerOk returns a tuple with the Banner field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+// NOTE: If the value is an explicit nil, `nil, true` will be returned.
+func (o *AuthNMappingTeamAttributes) GetBannerOk() (*int64, bool) {
+ if o == nil {
+ return nil, false
+ }
+ return o.Banner.Get(), o.Banner.IsSet()
+}
+
+// HasBanner returns a boolean if a field has been set.
+func (o *AuthNMappingTeamAttributes) HasBanner() bool {
+ return o != nil && o.Banner.IsSet()
+}
+
+// SetBanner gets a reference to the given datadog.NullableInt64 and assigns it to the Banner field.
+func (o *AuthNMappingTeamAttributes) SetBanner(v int64) {
+ o.Banner.Set(&v)
+}
+
+// SetBannerNil sets the value for Banner to be an explicit nil.
+func (o *AuthNMappingTeamAttributes) SetBannerNil() {
+ o.Banner.Set(nil)
+}
+
+// UnsetBanner ensures that no value is present for Banner, not even an explicit nil.
+func (o *AuthNMappingTeamAttributes) UnsetBanner() {
+ o.Banner.Unset()
+}
+
+// GetHandle returns the Handle field value if set, zero value otherwise.
+func (o *AuthNMappingTeamAttributes) GetHandle() string {
+ if o == nil || o.Handle == nil {
+ var ret string
+ return ret
+ }
+ return *o.Handle
+}
+
+// GetHandleOk returns a tuple with the Handle field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *AuthNMappingTeamAttributes) GetHandleOk() (*string, bool) {
+ if o == nil || o.Handle == nil {
+ return nil, false
+ }
+ return o.Handle, true
+}
+
+// HasHandle returns a boolean if a field has been set.
+func (o *AuthNMappingTeamAttributes) HasHandle() bool {
+ return o != nil && o.Handle != nil
+}
+
+// SetHandle gets a reference to the given string and assigns it to the Handle field.
+func (o *AuthNMappingTeamAttributes) SetHandle(v string) {
+ o.Handle = &v
+}
+
+// GetLinkCount returns the LinkCount field value if set, zero value otherwise.
+func (o *AuthNMappingTeamAttributes) GetLinkCount() int32 {
+ if o == nil || o.LinkCount == nil {
+ var ret int32
+ return ret
+ }
+ return *o.LinkCount
+}
+
+// GetLinkCountOk returns a tuple with the LinkCount field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *AuthNMappingTeamAttributes) GetLinkCountOk() (*int32, bool) {
+ if o == nil || o.LinkCount == nil {
+ return nil, false
+ }
+ return o.LinkCount, true
+}
+
+// HasLinkCount returns a boolean if a field has been set.
+func (o *AuthNMappingTeamAttributes) HasLinkCount() bool {
+ return o != nil && o.LinkCount != nil
+}
+
+// SetLinkCount gets a reference to the given int32 and assigns it to the LinkCount field.
+func (o *AuthNMappingTeamAttributes) SetLinkCount(v int32) {
+ o.LinkCount = &v
+}
+
+// GetName returns the Name field value if set, zero value otherwise.
+func (o *AuthNMappingTeamAttributes) GetName() string {
+ if o == nil || o.Name == nil {
+ var ret string
+ return ret
+ }
+ return *o.Name
+}
+
+// GetNameOk returns a tuple with the Name field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *AuthNMappingTeamAttributes) GetNameOk() (*string, bool) {
+ if o == nil || o.Name == nil {
+ return nil, false
+ }
+ return o.Name, true
+}
+
+// HasName returns a boolean if a field has been set.
+func (o *AuthNMappingTeamAttributes) HasName() bool {
+ return o != nil && o.Name != nil
+}
+
+// SetName gets a reference to the given string and assigns it to the Name field.
+func (o *AuthNMappingTeamAttributes) SetName(v string) {
+ o.Name = &v
+}
+
+// GetSummary returns the Summary field value if set, zero value otherwise (both if not set or set to explicit null).
+func (o *AuthNMappingTeamAttributes) GetSummary() string {
+ if o == nil || o.Summary.Get() == nil {
+ var ret string
+ return ret
+ }
+ return *o.Summary.Get()
+}
+
+// GetSummaryOk returns a tuple with the Summary field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+// NOTE: If the value is an explicit nil, `nil, true` will be returned.
+func (o *AuthNMappingTeamAttributes) GetSummaryOk() (*string, bool) {
+ if o == nil {
+ return nil, false
+ }
+ return o.Summary.Get(), o.Summary.IsSet()
+}
+
+// HasSummary returns a boolean if a field has been set.
+func (o *AuthNMappingTeamAttributes) HasSummary() bool {
+ return o != nil && o.Summary.IsSet()
+}
+
+// SetSummary gets a reference to the given datadog.NullableString and assigns it to the Summary field.
+func (o *AuthNMappingTeamAttributes) SetSummary(v string) {
+ o.Summary.Set(&v)
+}
+
+// SetSummaryNil sets the value for Summary to be an explicit nil.
+func (o *AuthNMappingTeamAttributes) SetSummaryNil() {
+ o.Summary.Set(nil)
+}
+
+// UnsetSummary ensures that no value is present for Summary, not even an explicit nil.
+func (o *AuthNMappingTeamAttributes) UnsetSummary() {
+ o.Summary.Unset()
+}
+
+// GetUserCount returns the UserCount field value if set, zero value otherwise.
+func (o *AuthNMappingTeamAttributes) GetUserCount() int32 {
+ if o == nil || o.UserCount == nil {
+ var ret int32
+ return ret
+ }
+ return *o.UserCount
+}
+
+// GetUserCountOk returns a tuple with the UserCount field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *AuthNMappingTeamAttributes) GetUserCountOk() (*int32, bool) {
+ if o == nil || o.UserCount == nil {
+ return nil, false
+ }
+ return o.UserCount, true
+}
+
+// HasUserCount returns a boolean if a field has been set.
+func (o *AuthNMappingTeamAttributes) HasUserCount() bool {
+ return o != nil && o.UserCount != nil
+}
+
+// SetUserCount gets a reference to the given int32 and assigns it to the UserCount field.
+func (o *AuthNMappingTeamAttributes) SetUserCount(v int32) {
+ o.UserCount = &v
+}
+
+// MarshalJSON serializes the struct using spec logic.
+func (o AuthNMappingTeamAttributes) MarshalJSON() ([]byte, error) {
+ toSerialize := map[string]interface{}{}
+ if o.UnparsedObject != nil {
+ return datadog.Marshal(o.UnparsedObject)
+ }
+ if o.Avatar.IsSet() {
+ toSerialize["avatar"] = o.Avatar.Get()
+ }
+ if o.Banner.IsSet() {
+ toSerialize["banner"] = o.Banner.Get()
+ }
+ if o.Handle != nil {
+ toSerialize["handle"] = o.Handle
+ }
+ if o.LinkCount != nil {
+ toSerialize["link_count"] = o.LinkCount
+ }
+ if o.Name != nil {
+ toSerialize["name"] = o.Name
+ }
+ if o.Summary.IsSet() {
+ toSerialize["summary"] = o.Summary.Get()
+ }
+ if o.UserCount != nil {
+ toSerialize["user_count"] = o.UserCount
+ }
+
+ for key, value := range o.AdditionalProperties {
+ toSerialize[key] = value
+ }
+ return datadog.Marshal(toSerialize)
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (o *AuthNMappingTeamAttributes) UnmarshalJSON(bytes []byte) (err error) {
+ all := struct {
+ Avatar datadog.NullableString `json:"avatar,omitempty"`
+ Banner datadog.NullableInt64 `json:"banner,omitempty"`
+ Handle *string `json:"handle,omitempty"`
+ LinkCount *int32 `json:"link_count,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Summary datadog.NullableString `json:"summary,omitempty"`
+ UserCount *int32 `json:"user_count,omitempty"`
+ }{}
+ if err = datadog.Unmarshal(bytes, &all); err != nil {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+ additionalProperties := make(map[string]interface{})
+ if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
+ datadog.DeleteKeys(additionalProperties, &[]string{"avatar", "banner", "handle", "link_count", "name", "summary", "user_count"})
+ } else {
+ return err
+ }
+ o.Avatar = all.Avatar
+ o.Banner = all.Banner
+ o.Handle = all.Handle
+ o.LinkCount = all.LinkCount
+ o.Name = all.Name
+ o.Summary = all.Summary
+ o.UserCount = all.UserCount
+
+ if len(additionalProperties) > 0 {
+ o.AdditionalProperties = additionalProperties
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_update_data.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_update_data.go
index 9d87350618..045820a1a8 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_update_data.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_update_data.go
@@ -16,7 +16,7 @@ type AuthNMappingUpdateData struct {
Attributes *AuthNMappingUpdateAttributes `json:"attributes,omitempty"`
// ID of the AuthN Mapping.
Id string `json:"id"`
- // Relationship of AuthN Mapping update object to Role.
+ // Relationship of AuthN Mapping update object to a Role or Team.
Relationships *AuthNMappingUpdateRelationships `json:"relationships,omitempty"`
// AuthN Mappings resource type.
Type AuthNMappingsType `json:"type"`
@@ -199,9 +199,6 @@ func (o *AuthNMappingUpdateData) UnmarshalJSON(bytes []byte) (err error) {
}
o.Attributes = all.Attributes
o.Id = *all.Id
- if all.Relationships != nil && all.Relationships.UnparsedObject != nil && o.UnparsedObject == nil {
- hasInvalidField = true
- }
o.Relationships = all.Relationships
if !all.Type.IsValid() {
hasInvalidField = true
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_update_relationships.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_update_relationships.go
index 81887cf9a4..92ba6041d6 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_update_relationships.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_auth_n_mapping_update_relationships.go
@@ -8,104 +8,98 @@ import (
"github.com/DataDog/datadog-api-client-go/v2/api/datadog"
)
-// AuthNMappingUpdateRelationships Relationship of AuthN Mapping update object to Role.
+// AuthNMappingUpdateRelationships - Relationship of AuthN Mapping update object to a Role or Team.
type AuthNMappingUpdateRelationships struct {
- // Relationship to role.
- Role *RelationshipToRole `json:"role,omitempty"`
- // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
- UnparsedObject map[string]interface{} `json:"-"`
- AdditionalProperties map[string]interface{}
-}
+ AuthNMappingRelationshipToRole *AuthNMappingRelationshipToRole
+ AuthNMappingRelationshipToTeam *AuthNMappingRelationshipToTeam
-// NewAuthNMappingUpdateRelationships instantiates a new AuthNMappingUpdateRelationships object.
-// This constructor will assign default values to properties that have it defined,
-// and makes sure properties required by API are set, but the set of arguments
-// will change when the set of required properties is changed.
-func NewAuthNMappingUpdateRelationships() *AuthNMappingUpdateRelationships {
- this := AuthNMappingUpdateRelationships{}
- return &this
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject interface{}
}
-// NewAuthNMappingUpdateRelationshipsWithDefaults instantiates a new AuthNMappingUpdateRelationships object.
-// This constructor will only assign default values to properties that have it defined,
-// but it doesn't guarantee that properties required by API are set.
-func NewAuthNMappingUpdateRelationshipsWithDefaults() *AuthNMappingUpdateRelationships {
- this := AuthNMappingUpdateRelationships{}
- return &this
+// AuthNMappingRelationshipToRoleAsAuthNMappingUpdateRelationships is a convenience function that returns AuthNMappingRelationshipToRole wrapped in AuthNMappingUpdateRelationships.
+func AuthNMappingRelationshipToRoleAsAuthNMappingUpdateRelationships(v *AuthNMappingRelationshipToRole) AuthNMappingUpdateRelationships {
+ return AuthNMappingUpdateRelationships{AuthNMappingRelationshipToRole: v}
}
-// GetRole returns the Role field value if set, zero value otherwise.
-func (o *AuthNMappingUpdateRelationships) GetRole() RelationshipToRole {
- if o == nil || o.Role == nil {
- var ret RelationshipToRole
- return ret
- }
- return *o.Role
+// AuthNMappingRelationshipToTeamAsAuthNMappingUpdateRelationships is a convenience function that returns AuthNMappingRelationshipToTeam wrapped in AuthNMappingUpdateRelationships.
+func AuthNMappingRelationshipToTeamAsAuthNMappingUpdateRelationships(v *AuthNMappingRelationshipToTeam) AuthNMappingUpdateRelationships {
+ return AuthNMappingUpdateRelationships{AuthNMappingRelationshipToTeam: v}
}
-// GetRoleOk returns a tuple with the Role field value if set, nil otherwise
-// and a boolean to check if the value has been set.
-func (o *AuthNMappingUpdateRelationships) GetRoleOk() (*RelationshipToRole, bool) {
- if o == nil || o.Role == nil {
- return nil, false
+// UnmarshalJSON turns data into one of the pointers in the struct.
+func (obj *AuthNMappingUpdateRelationships) UnmarshalJSON(data []byte) error {
+ var err error
+ match := 0
+ // try to unmarshal data into AuthNMappingRelationshipToRole
+ err = datadog.Unmarshal(data, &obj.AuthNMappingRelationshipToRole)
+ if err == nil {
+ if obj.AuthNMappingRelationshipToRole != nil && obj.AuthNMappingRelationshipToRole.UnparsedObject == nil {
+ jsonAuthNMappingRelationshipToRole, _ := datadog.Marshal(obj.AuthNMappingRelationshipToRole)
+ if string(jsonAuthNMappingRelationshipToRole) == "{}" { // empty struct
+ obj.AuthNMappingRelationshipToRole = nil
+ } else {
+ match++
+ }
+ } else {
+ obj.AuthNMappingRelationshipToRole = nil
+ }
+ } else {
+ obj.AuthNMappingRelationshipToRole = nil
}
- return o.Role, true
-}
-
-// HasRole returns a boolean if a field has been set.
-func (o *AuthNMappingUpdateRelationships) HasRole() bool {
- return o != nil && o.Role != nil
-}
-// SetRole gets a reference to the given RelationshipToRole and assigns it to the Role field.
-func (o *AuthNMappingUpdateRelationships) SetRole(v RelationshipToRole) {
- o.Role = &v
-}
-
-// MarshalJSON serializes the struct using spec logic.
-func (o AuthNMappingUpdateRelationships) MarshalJSON() ([]byte, error) {
- toSerialize := map[string]interface{}{}
- if o.UnparsedObject != nil {
- return datadog.Marshal(o.UnparsedObject)
- }
- if o.Role != nil {
- toSerialize["role"] = o.Role
+ // try to unmarshal data into AuthNMappingRelationshipToTeam
+ err = datadog.Unmarshal(data, &obj.AuthNMappingRelationshipToTeam)
+ if err == nil {
+ if obj.AuthNMappingRelationshipToTeam != nil && obj.AuthNMappingRelationshipToTeam.UnparsedObject == nil {
+ jsonAuthNMappingRelationshipToTeam, _ := datadog.Marshal(obj.AuthNMappingRelationshipToTeam)
+ if string(jsonAuthNMappingRelationshipToTeam) == "{}" { // empty struct
+ obj.AuthNMappingRelationshipToTeam = nil
+ } else {
+ match++
+ }
+ } else {
+ obj.AuthNMappingRelationshipToTeam = nil
+ }
+ } else {
+ obj.AuthNMappingRelationshipToTeam = nil
}
- for key, value := range o.AdditionalProperties {
- toSerialize[key] = value
+ if match != 1 { // more than 1 match
+ // reset to nil
+ obj.AuthNMappingRelationshipToRole = nil
+ obj.AuthNMappingRelationshipToTeam = nil
+ return datadog.Unmarshal(data, &obj.UnparsedObject)
}
- return datadog.Marshal(toSerialize)
+ return nil // exactly one match
}
-// UnmarshalJSON deserializes the given payload.
-func (o *AuthNMappingUpdateRelationships) UnmarshalJSON(bytes []byte) (err error) {
- all := struct {
- Role *RelationshipToRole `json:"role,omitempty"`
- }{}
- if err = datadog.Unmarshal(bytes, &all); err != nil {
- return datadog.Unmarshal(bytes, &o.UnparsedObject)
+// MarshalJSON turns data from the first non-nil pointers in the struct to JSON.
+func (obj AuthNMappingUpdateRelationships) MarshalJSON() ([]byte, error) {
+ if obj.AuthNMappingRelationshipToRole != nil {
+ return datadog.Marshal(&obj.AuthNMappingRelationshipToRole)
}
- additionalProperties := make(map[string]interface{})
- if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
- datadog.DeleteKeys(additionalProperties, &[]string{"role"})
- } else {
- return err
+
+ if obj.AuthNMappingRelationshipToTeam != nil {
+ return datadog.Marshal(&obj.AuthNMappingRelationshipToTeam)
}
- hasInvalidField := false
- if all.Role != nil && all.Role.UnparsedObject != nil && o.UnparsedObject == nil {
- hasInvalidField = true
+ if obj.UnparsedObject != nil {
+ return datadog.Marshal(obj.UnparsedObject)
}
- o.Role = all.Role
+ return nil, nil // no data in oneOf schemas
+}
- if len(additionalProperties) > 0 {
- o.AdditionalProperties = additionalProperties
+// GetActualInstance returns the actual instance.
+func (obj *AuthNMappingUpdateRelationships) GetActualInstance() interface{} {
+ if obj.AuthNMappingRelationshipToRole != nil {
+ return obj.AuthNMappingRelationshipToRole
}
- if hasInvalidField {
- return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ if obj.AuthNMappingRelationshipToTeam != nil {
+ return obj.AuthNMappingRelationshipToTeam
}
+ // all schemas are nil
return nil
}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_custom_destination_create_request_definition.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_custom_destination_create_request_definition.go
index 4c71e08aa9..3b9c460233 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_custom_destination_create_request_definition.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_custom_destination_create_request_definition.go
@@ -37,7 +37,7 @@ func NewCustomDestinationCreateRequestDefinition(attributes CustomDestinationCre
// but it doesn't guarantee that properties required by API are set.
func NewCustomDestinationCreateRequestDefinitionWithDefaults() *CustomDestinationCreateRequestDefinition {
this := CustomDestinationCreateRequestDefinition{}
- var typeVar CustomDestinationType = CUSTOMDESTINATIONTYPE_custom_destination
+ var typeVar CustomDestinationType = CUSTOMDESTINATIONTYPE_CUSTOM_DESTINATION
this.Type = typeVar
return &this
}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_custom_destination_response_definition.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_custom_destination_response_definition.go
index 37c3ca1e6a..fdab1dec7a 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_custom_destination_response_definition.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_custom_destination_response_definition.go
@@ -27,7 +27,7 @@ type CustomDestinationResponseDefinition struct {
// will change when the set of required properties is changed.
func NewCustomDestinationResponseDefinition() *CustomDestinationResponseDefinition {
this := CustomDestinationResponseDefinition{}
- var typeVar CustomDestinationType = CUSTOMDESTINATIONTYPE_custom_destination
+ var typeVar CustomDestinationType = CUSTOMDESTINATIONTYPE_CUSTOM_DESTINATION
this.Type = &typeVar
return &this
}
@@ -37,7 +37,7 @@ func NewCustomDestinationResponseDefinition() *CustomDestinationResponseDefiniti
// but it doesn't guarantee that properties required by API are set.
func NewCustomDestinationResponseDefinitionWithDefaults() *CustomDestinationResponseDefinition {
this := CustomDestinationResponseDefinition{}
- var typeVar CustomDestinationType = CUSTOMDESTINATIONTYPE_custom_destination
+ var typeVar CustomDestinationType = CUSTOMDESTINATIONTYPE_CUSTOM_DESTINATION
this.Type = &typeVar
return &this
}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_custom_destination_type.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_custom_destination_type.go
index 3edd991f99..ca2bd622fe 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_custom_destination_type.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_custom_destination_type.go
@@ -15,11 +15,11 @@ type CustomDestinationType string
// List of CustomDestinationType.
const (
- CUSTOMDESTINATIONTYPE_custom_destination CustomDestinationType = "custom_destination"
+ CUSTOMDESTINATIONTYPE_CUSTOM_DESTINATION CustomDestinationType = "custom_destination"
)
var allowedCustomDestinationTypeEnumValues = []CustomDestinationType{
- CUSTOMDESTINATIONTYPE_custom_destination,
+ CUSTOMDESTINATIONTYPE_CUSTOM_DESTINATION,
}
// GetAllowedValues reeturns the list of possible values.
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_custom_destination_update_request_definition.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_custom_destination_update_request_definition.go
index e562e1cf61..842cde131b 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_custom_destination_update_request_definition.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_custom_destination_update_request_definition.go
@@ -39,7 +39,7 @@ func NewCustomDestinationUpdateRequestDefinition(id string, typeVar CustomDestin
// but it doesn't guarantee that properties required by API are set.
func NewCustomDestinationUpdateRequestDefinitionWithDefaults() *CustomDestinationUpdateRequestDefinition {
this := CustomDestinationUpdateRequestDefinition{}
- var typeVar CustomDestinationType = CUSTOMDESTINATIONTYPE_custom_destination
+ var typeVar CustomDestinationType = CUSTOMDESTINATIONTYPE_CUSTOM_DESTINATION
this.Type = typeVar
return &this
}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_dora_deployment_request_attributes.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_dora_deployment_request_attributes.go
index 29a04bd3b6..2db091d35a 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_dora_deployment_request_attributes.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_dora_deployment_request_attributes.go
@@ -14,7 +14,7 @@ import (
type DORADeploymentRequestAttributes struct {
// Environment name to where the service was deployed.
Env *string `json:"env,omitempty"`
- // Unix timestamp in nanoseconds when the deployment finished. It should not be older than 3 hours.
+ // Unix timestamp in nanoseconds when the deployment finished. It should not be older than 1 hour.
FinishedAt int64 `json:"finished_at"`
// Git info for DORA Metrics events.
Git *DORAGitInfo `json:"git,omitempty"`
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_logs_query_filter.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_logs_query_filter.go
index c741bcd265..f9c5d60a5a 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_logs_query_filter.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_logs_query_filter.go
@@ -16,7 +16,7 @@ type LogsQueryFilter struct {
Indexes []string `json:"indexes,omitempty"`
// The search query - following the log search syntax.
Query *string `json:"query,omitempty"`
- // Specifies storage type as indexes or online-archives
+ // Specifies storage type as indexes, online-archives or flex
StorageTier *LogsStorageTier `json:"storage_tier,omitempty"`
// The maximum time for the requested logs, supports date math and regular timestamps (milliseconds).
To *string `json:"to,omitempty"`
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_logs_storage_tier.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_logs_storage_tier.go
index f50513cc1d..f6a81c62ac 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_logs_storage_tier.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_logs_storage_tier.go
@@ -10,18 +10,20 @@ import (
"github.com/DataDog/datadog-api-client-go/v2/api/datadog"
)
-// LogsStorageTier Specifies storage type as indexes or online-archives
+// LogsStorageTier Specifies storage type as indexes, online-archives or flex
type LogsStorageTier string
// List of LogsStorageTier.
const (
LOGSSTORAGETIER_INDEXES LogsStorageTier = "indexes"
LOGSSTORAGETIER_ONLINE_ARCHIVES LogsStorageTier = "online-archives"
+ LOGSSTORAGETIER_FLEX LogsStorageTier = "flex"
)
var allowedLogsStorageTierEnumValues = []LogsStorageTier{
LOGSSTORAGETIER_INDEXES,
LOGSSTORAGETIER_ONLINE_ARCHIVES,
+ LOGSSTORAGETIER_FLEX,
}
// GetAllowedValues reeturns the list of possible values.
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_metric_series.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_metric_series.go
index 10db3ddaac..74b5fe6e67 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_metric_series.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_metric_series.go
@@ -13,7 +13,7 @@ import (
// MetricSeries A metric to submit to Datadog.
// See [Datadog metrics](https://docs.datadoghq.com/developers/metrics/#custom-metrics-properties).
type MetricSeries struct {
- // If the type of the metric is rate or count, define the corresponding interval.
+ // If the type of the metric is rate or count, define the corresponding interval in seconds.
Interval *int64 `json:"interval,omitempty"`
// Metadata for the metric.
Metadata *MetricMetadata `json:"metadata,omitempty"`
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_relationship_to_team.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_relationship_to_team.go
new file mode 100644
index 0000000000..2d1fbea8ef
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_relationship_to_team.go
@@ -0,0 +1,111 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV2
+
+import (
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// RelationshipToTeam Relationship to team.
+type RelationshipToTeam struct {
+ // Relationship to Team object.
+ Data *RelationshipToTeamData `json:"data,omitempty"`
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject map[string]interface{} `json:"-"`
+ AdditionalProperties map[string]interface{}
+}
+
+// NewRelationshipToTeam instantiates a new RelationshipToTeam object.
+// This constructor will assign default values to properties that have it defined,
+// and makes sure properties required by API are set, but the set of arguments
+// will change when the set of required properties is changed.
+func NewRelationshipToTeam() *RelationshipToTeam {
+ this := RelationshipToTeam{}
+ return &this
+}
+
+// NewRelationshipToTeamWithDefaults instantiates a new RelationshipToTeam object.
+// This constructor will only assign default values to properties that have it defined,
+// but it doesn't guarantee that properties required by API are set.
+func NewRelationshipToTeamWithDefaults() *RelationshipToTeam {
+ this := RelationshipToTeam{}
+ return &this
+}
+
+// GetData returns the Data field value if set, zero value otherwise.
+func (o *RelationshipToTeam) GetData() RelationshipToTeamData {
+ if o == nil || o.Data == nil {
+ var ret RelationshipToTeamData
+ return ret
+ }
+ return *o.Data
+}
+
+// GetDataOk returns a tuple with the Data field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *RelationshipToTeam) GetDataOk() (*RelationshipToTeamData, bool) {
+ if o == nil || o.Data == nil {
+ return nil, false
+ }
+ return o.Data, true
+}
+
+// HasData returns a boolean if a field has been set.
+func (o *RelationshipToTeam) HasData() bool {
+ return o != nil && o.Data != nil
+}
+
+// SetData gets a reference to the given RelationshipToTeamData and assigns it to the Data field.
+func (o *RelationshipToTeam) SetData(v RelationshipToTeamData) {
+ o.Data = &v
+}
+
+// MarshalJSON serializes the struct using spec logic.
+func (o RelationshipToTeam) MarshalJSON() ([]byte, error) {
+ toSerialize := map[string]interface{}{}
+ if o.UnparsedObject != nil {
+ return datadog.Marshal(o.UnparsedObject)
+ }
+ if o.Data != nil {
+ toSerialize["data"] = o.Data
+ }
+
+ for key, value := range o.AdditionalProperties {
+ toSerialize[key] = value
+ }
+ return datadog.Marshal(toSerialize)
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (o *RelationshipToTeam) UnmarshalJSON(bytes []byte) (err error) {
+ all := struct {
+ Data *RelationshipToTeamData `json:"data,omitempty"`
+ }{}
+ if err = datadog.Unmarshal(bytes, &all); err != nil {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+ additionalProperties := make(map[string]interface{})
+ if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
+ datadog.DeleteKeys(additionalProperties, &[]string{"data"})
+ } else {
+ return err
+ }
+
+ hasInvalidField := false
+ if all.Data != nil && all.Data.UnparsedObject != nil && o.UnparsedObject == nil {
+ hasInvalidField = true
+ }
+ o.Data = all.Data
+
+ if len(additionalProperties) > 0 {
+ o.AdditionalProperties = additionalProperties
+ }
+
+ if hasInvalidField {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_relationship_to_team_data.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_relationship_to_team_data.go
new file mode 100644
index 0000000000..d6b2ba6710
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_relationship_to_team_data.go
@@ -0,0 +1,151 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV2
+
+import (
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// RelationshipToTeamData Relationship to Team object.
+type RelationshipToTeamData struct {
+ // The unique identifier of the team.
+ Id *string `json:"id,omitempty"`
+ // Team type
+ Type *TeamType `json:"type,omitempty"`
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject map[string]interface{} `json:"-"`
+ AdditionalProperties map[string]interface{}
+}
+
+// NewRelationshipToTeamData instantiates a new RelationshipToTeamData object.
+// This constructor will assign default values to properties that have it defined,
+// and makes sure properties required by API are set, but the set of arguments
+// will change when the set of required properties is changed.
+func NewRelationshipToTeamData() *RelationshipToTeamData {
+ this := RelationshipToTeamData{}
+ var typeVar TeamType = TEAMTYPE_TEAM
+ this.Type = &typeVar
+ return &this
+}
+
+// NewRelationshipToTeamDataWithDefaults instantiates a new RelationshipToTeamData object.
+// This constructor will only assign default values to properties that have it defined,
+// but it doesn't guarantee that properties required by API are set.
+func NewRelationshipToTeamDataWithDefaults() *RelationshipToTeamData {
+ this := RelationshipToTeamData{}
+ var typeVar TeamType = TEAMTYPE_TEAM
+ this.Type = &typeVar
+ return &this
+}
+
+// GetId returns the Id field value if set, zero value otherwise.
+func (o *RelationshipToTeamData) GetId() string {
+ if o == nil || o.Id == nil {
+ var ret string
+ return ret
+ }
+ return *o.Id
+}
+
+// GetIdOk returns a tuple with the Id field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *RelationshipToTeamData) GetIdOk() (*string, bool) {
+ if o == nil || o.Id == nil {
+ return nil, false
+ }
+ return o.Id, true
+}
+
+// HasId returns a boolean if a field has been set.
+func (o *RelationshipToTeamData) HasId() bool {
+ return o != nil && o.Id != nil
+}
+
+// SetId gets a reference to the given string and assigns it to the Id field.
+func (o *RelationshipToTeamData) SetId(v string) {
+ o.Id = &v
+}
+
+// GetType returns the Type field value if set, zero value otherwise.
+func (o *RelationshipToTeamData) GetType() TeamType {
+ if o == nil || o.Type == nil {
+ var ret TeamType
+ return ret
+ }
+ return *o.Type
+}
+
+// GetTypeOk returns a tuple with the Type field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *RelationshipToTeamData) GetTypeOk() (*TeamType, bool) {
+ if o == nil || o.Type == nil {
+ return nil, false
+ }
+ return o.Type, true
+}
+
+// HasType returns a boolean if a field has been set.
+func (o *RelationshipToTeamData) HasType() bool {
+ return o != nil && o.Type != nil
+}
+
+// SetType gets a reference to the given TeamType and assigns it to the Type field.
+func (o *RelationshipToTeamData) SetType(v TeamType) {
+ o.Type = &v
+}
+
+// MarshalJSON serializes the struct using spec logic.
+func (o RelationshipToTeamData) MarshalJSON() ([]byte, error) {
+ toSerialize := map[string]interface{}{}
+ if o.UnparsedObject != nil {
+ return datadog.Marshal(o.UnparsedObject)
+ }
+ if o.Id != nil {
+ toSerialize["id"] = o.Id
+ }
+ if o.Type != nil {
+ toSerialize["type"] = o.Type
+ }
+
+ for key, value := range o.AdditionalProperties {
+ toSerialize[key] = value
+ }
+ return datadog.Marshal(toSerialize)
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (o *RelationshipToTeamData) UnmarshalJSON(bytes []byte) (err error) {
+ all := struct {
+ Id *string `json:"id,omitempty"`
+ Type *TeamType `json:"type,omitempty"`
+ }{}
+ if err = datadog.Unmarshal(bytes, &all); err != nil {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+ additionalProperties := make(map[string]interface{})
+ if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
+ datadog.DeleteKeys(additionalProperties, &[]string{"id", "type"})
+ } else {
+ return err
+ }
+
+ hasInvalidField := false
+ o.Id = all.Id
+ if all.Type != nil && !all.Type.IsValid() {
+ hasInvalidField = true
+ } else {
+ o.Type = all.Type
+ }
+
+ if len(additionalProperties) > 0 {
+ o.AdditionalProperties = additionalProperties
+ }
+
+ if hasInvalidField {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_retention_filter_create_response.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_retention_filter_create_response.go
new file mode 100644
index 0000000000..ef9aeada84
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_retention_filter_create_response.go
@@ -0,0 +1,111 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV2
+
+import (
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// RetentionFilterCreateResponse The retention filters definition.
+type RetentionFilterCreateResponse struct {
+ // The definition of the retention filter.
+ Data *RetentionFilter `json:"data,omitempty"`
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject map[string]interface{} `json:"-"`
+ AdditionalProperties map[string]interface{}
+}
+
+// NewRetentionFilterCreateResponse instantiates a new RetentionFilterCreateResponse object.
+// This constructor will assign default values to properties that have it defined,
+// and makes sure properties required by API are set, but the set of arguments
+// will change when the set of required properties is changed.
+func NewRetentionFilterCreateResponse() *RetentionFilterCreateResponse {
+ this := RetentionFilterCreateResponse{}
+ return &this
+}
+
+// NewRetentionFilterCreateResponseWithDefaults instantiates a new RetentionFilterCreateResponse object.
+// This constructor will only assign default values to properties that have it defined,
+// but it doesn't guarantee that properties required by API are set.
+func NewRetentionFilterCreateResponseWithDefaults() *RetentionFilterCreateResponse {
+ this := RetentionFilterCreateResponse{}
+ return &this
+}
+
+// GetData returns the Data field value if set, zero value otherwise.
+func (o *RetentionFilterCreateResponse) GetData() RetentionFilter {
+ if o == nil || o.Data == nil {
+ var ret RetentionFilter
+ return ret
+ }
+ return *o.Data
+}
+
+// GetDataOk returns a tuple with the Data field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *RetentionFilterCreateResponse) GetDataOk() (*RetentionFilter, bool) {
+ if o == nil || o.Data == nil {
+ return nil, false
+ }
+ return o.Data, true
+}
+
+// HasData returns a boolean if a field has been set.
+func (o *RetentionFilterCreateResponse) HasData() bool {
+ return o != nil && o.Data != nil
+}
+
+// SetData gets a reference to the given RetentionFilter and assigns it to the Data field.
+func (o *RetentionFilterCreateResponse) SetData(v RetentionFilter) {
+ o.Data = &v
+}
+
+// MarshalJSON serializes the struct using spec logic.
+func (o RetentionFilterCreateResponse) MarshalJSON() ([]byte, error) {
+ toSerialize := map[string]interface{}{}
+ if o.UnparsedObject != nil {
+ return datadog.Marshal(o.UnparsedObject)
+ }
+ if o.Data != nil {
+ toSerialize["data"] = o.Data
+ }
+
+ for key, value := range o.AdditionalProperties {
+ toSerialize[key] = value
+ }
+ return datadog.Marshal(toSerialize)
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (o *RetentionFilterCreateResponse) UnmarshalJSON(bytes []byte) (err error) {
+ all := struct {
+ Data *RetentionFilter `json:"data,omitempty"`
+ }{}
+ if err = datadog.Unmarshal(bytes, &all); err != nil {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+ additionalProperties := make(map[string]interface{})
+ if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
+ datadog.DeleteKeys(additionalProperties, &[]string{"data"})
+ } else {
+ return err
+ }
+
+ hasInvalidField := false
+ if all.Data != nil && all.Data.UnparsedObject != nil && o.UnparsedObject == nil {
+ hasInvalidField = true
+ }
+ o.Data = all.Data
+
+ if len(additionalProperties) > 0 {
+ o.AdditionalProperties = additionalProperties
+ }
+
+ if hasInvalidField {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_retention_filter_response.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_retention_filter_response.go
index 3d58bfcac4..e27d23ab37 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_retention_filter_response.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_retention_filter_response.go
@@ -11,7 +11,7 @@ import (
// RetentionFilterResponse The retention filters definition.
type RetentionFilterResponse struct {
// The definition of the retention filter.
- Data *RetentionFilter `json:"data,omitempty"`
+ Data *RetentionFilterAll `json:"data,omitempty"`
// UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
UnparsedObject map[string]interface{} `json:"-"`
AdditionalProperties map[string]interface{}
@@ -35,9 +35,9 @@ func NewRetentionFilterResponseWithDefaults() *RetentionFilterResponse {
}
// GetData returns the Data field value if set, zero value otherwise.
-func (o *RetentionFilterResponse) GetData() RetentionFilter {
+func (o *RetentionFilterResponse) GetData() RetentionFilterAll {
if o == nil || o.Data == nil {
- var ret RetentionFilter
+ var ret RetentionFilterAll
return ret
}
return *o.Data
@@ -45,7 +45,7 @@ func (o *RetentionFilterResponse) GetData() RetentionFilter {
// GetDataOk returns a tuple with the Data field value if set, nil otherwise
// and a boolean to check if the value has been set.
-func (o *RetentionFilterResponse) GetDataOk() (*RetentionFilter, bool) {
+func (o *RetentionFilterResponse) GetDataOk() (*RetentionFilterAll, bool) {
if o == nil || o.Data == nil {
return nil, false
}
@@ -57,8 +57,8 @@ func (o *RetentionFilterResponse) HasData() bool {
return o != nil && o.Data != nil
}
-// SetData gets a reference to the given RetentionFilter and assigns it to the Data field.
-func (o *RetentionFilterResponse) SetData(v RetentionFilter) {
+// SetData gets a reference to the given RetentionFilterAll and assigns it to the Data field.
+func (o *RetentionFilterResponse) SetData(v RetentionFilterAll) {
o.Data = &v
}
@@ -81,7 +81,7 @@ func (o RetentionFilterResponse) MarshalJSON() ([]byte, error) {
// UnmarshalJSON deserializes the given payload.
func (o *RetentionFilterResponse) UnmarshalJSON(bytes []byte) (err error) {
all := struct {
- Data *RetentionFilter `json:"data,omitempty"`
+ Data *RetentionFilterAll `json:"data,omitempty"`
}{}
if err = datadog.Unmarshal(bytes, &all); err != nil {
return datadog.Unmarshal(bytes, &o.UnparsedObject)
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_retention_filter_update_attributes.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_retention_filter_update_attributes.go
new file mode 100644
index 0000000000..9ccb0d9e9a
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_retention_filter_update_attributes.go
@@ -0,0 +1,245 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV2
+
+import (
+ "fmt"
+
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// RetentionFilterUpdateAttributes The object describing the configuration of the retention filter to create/update.
+type RetentionFilterUpdateAttributes struct {
+ // Enable/Disable the retention filter.
+ Enabled bool `json:"enabled"`
+ // The spans filter. Spans matching this filter will be indexed and stored.
+ Filter SpansFilterCreate `json:"filter"`
+ // The type of retention filter.
+ FilterType RetentionFilterAllType `json:"filter_type"`
+ // The name of the retention filter.
+ Name string `json:"name"`
+ // Sample rate to apply to spans going through this retention filter,
+ // a value of 1.0 keeps all spans matching the query.
+ Rate float64 `json:"rate"`
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject map[string]interface{} `json:"-"`
+ AdditionalProperties map[string]interface{}
+}
+
+// NewRetentionFilterUpdateAttributes instantiates a new RetentionFilterUpdateAttributes object.
+// This constructor will assign default values to properties that have it defined,
+// and makes sure properties required by API are set, but the set of arguments
+// will change when the set of required properties is changed.
+func NewRetentionFilterUpdateAttributes(enabled bool, filter SpansFilterCreate, filterType RetentionFilterAllType, name string, rate float64) *RetentionFilterUpdateAttributes {
+ this := RetentionFilterUpdateAttributes{}
+ this.Enabled = enabled
+ this.Filter = filter
+ this.FilterType = filterType
+ this.Name = name
+ this.Rate = rate
+ return &this
+}
+
+// NewRetentionFilterUpdateAttributesWithDefaults instantiates a new RetentionFilterUpdateAttributes object.
+// This constructor will only assign default values to properties that have it defined,
+// but it doesn't guarantee that properties required by API are set.
+func NewRetentionFilterUpdateAttributesWithDefaults() *RetentionFilterUpdateAttributes {
+ this := RetentionFilterUpdateAttributes{}
+ var filterType RetentionFilterAllType = RETENTIONFILTERALLTYPE_SPANS_SAMPLING_PROCESSOR
+ this.FilterType = filterType
+ return &this
+}
+
+// GetEnabled returns the Enabled field value.
+func (o *RetentionFilterUpdateAttributes) GetEnabled() bool {
+ if o == nil {
+ var ret bool
+ return ret
+ }
+ return o.Enabled
+}
+
+// GetEnabledOk returns a tuple with the Enabled field value
+// and a boolean to check if the value has been set.
+func (o *RetentionFilterUpdateAttributes) GetEnabledOk() (*bool, bool) {
+ if o == nil {
+ return nil, false
+ }
+ return &o.Enabled, true
+}
+
+// SetEnabled sets field value.
+func (o *RetentionFilterUpdateAttributes) SetEnabled(v bool) {
+ o.Enabled = v
+}
+
+// GetFilter returns the Filter field value.
+func (o *RetentionFilterUpdateAttributes) GetFilter() SpansFilterCreate {
+ if o == nil {
+ var ret SpansFilterCreate
+ return ret
+ }
+ return o.Filter
+}
+
+// GetFilterOk returns a tuple with the Filter field value
+// and a boolean to check if the value has been set.
+func (o *RetentionFilterUpdateAttributes) GetFilterOk() (*SpansFilterCreate, bool) {
+ if o == nil {
+ return nil, false
+ }
+ return &o.Filter, true
+}
+
+// SetFilter sets field value.
+func (o *RetentionFilterUpdateAttributes) SetFilter(v SpansFilterCreate) {
+ o.Filter = v
+}
+
+// GetFilterType returns the FilterType field value.
+func (o *RetentionFilterUpdateAttributes) GetFilterType() RetentionFilterAllType {
+ if o == nil {
+ var ret RetentionFilterAllType
+ return ret
+ }
+ return o.FilterType
+}
+
+// GetFilterTypeOk returns a tuple with the FilterType field value
+// and a boolean to check if the value has been set.
+func (o *RetentionFilterUpdateAttributes) GetFilterTypeOk() (*RetentionFilterAllType, bool) {
+ if o == nil {
+ return nil, false
+ }
+ return &o.FilterType, true
+}
+
+// SetFilterType sets field value.
+func (o *RetentionFilterUpdateAttributes) SetFilterType(v RetentionFilterAllType) {
+ o.FilterType = v
+}
+
+// GetName returns the Name field value.
+func (o *RetentionFilterUpdateAttributes) GetName() string {
+ if o == nil {
+ var ret string
+ return ret
+ }
+ return o.Name
+}
+
+// GetNameOk returns a tuple with the Name field value
+// and a boolean to check if the value has been set.
+func (o *RetentionFilterUpdateAttributes) GetNameOk() (*string, bool) {
+ if o == nil {
+ return nil, false
+ }
+ return &o.Name, true
+}
+
+// SetName sets field value.
+func (o *RetentionFilterUpdateAttributes) SetName(v string) {
+ o.Name = v
+}
+
+// GetRate returns the Rate field value.
+func (o *RetentionFilterUpdateAttributes) GetRate() float64 {
+ if o == nil {
+ var ret float64
+ return ret
+ }
+ return o.Rate
+}
+
+// GetRateOk returns a tuple with the Rate field value
+// and a boolean to check if the value has been set.
+func (o *RetentionFilterUpdateAttributes) GetRateOk() (*float64, bool) {
+ if o == nil {
+ return nil, false
+ }
+ return &o.Rate, true
+}
+
+// SetRate sets field value.
+func (o *RetentionFilterUpdateAttributes) SetRate(v float64) {
+ o.Rate = v
+}
+
+// MarshalJSON serializes the struct using spec logic.
+func (o RetentionFilterUpdateAttributes) MarshalJSON() ([]byte, error) {
+ toSerialize := map[string]interface{}{}
+ if o.UnparsedObject != nil {
+ return datadog.Marshal(o.UnparsedObject)
+ }
+ toSerialize["enabled"] = o.Enabled
+ toSerialize["filter"] = o.Filter
+ toSerialize["filter_type"] = o.FilterType
+ toSerialize["name"] = o.Name
+ toSerialize["rate"] = o.Rate
+
+ for key, value := range o.AdditionalProperties {
+ toSerialize[key] = value
+ }
+ return datadog.Marshal(toSerialize)
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (o *RetentionFilterUpdateAttributes) UnmarshalJSON(bytes []byte) (err error) {
+ all := struct {
+ Enabled *bool `json:"enabled"`
+ Filter *SpansFilterCreate `json:"filter"`
+ FilterType *RetentionFilterAllType `json:"filter_type"`
+ Name *string `json:"name"`
+ Rate *float64 `json:"rate"`
+ }{}
+ if err = datadog.Unmarshal(bytes, &all); err != nil {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+ if all.Enabled == nil {
+ return fmt.Errorf("required field enabled missing")
+ }
+ if all.Filter == nil {
+ return fmt.Errorf("required field filter missing")
+ }
+ if all.FilterType == nil {
+ return fmt.Errorf("required field filter_type missing")
+ }
+ if all.Name == nil {
+ return fmt.Errorf("required field name missing")
+ }
+ if all.Rate == nil {
+ return fmt.Errorf("required field rate missing")
+ }
+ additionalProperties := make(map[string]interface{})
+ if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
+ datadog.DeleteKeys(additionalProperties, &[]string{"enabled", "filter", "filter_type", "name", "rate"})
+ } else {
+ return err
+ }
+
+ hasInvalidField := false
+ o.Enabled = *all.Enabled
+ if all.Filter.UnparsedObject != nil && o.UnparsedObject == nil {
+ hasInvalidField = true
+ }
+ o.Filter = *all.Filter
+ if !all.FilterType.IsValid() {
+ hasInvalidField = true
+ } else {
+ o.FilterType = *all.FilterType
+ }
+ o.Name = *all.Name
+ o.Rate = *all.Rate
+
+ if len(additionalProperties) > 0 {
+ o.AdditionalProperties = additionalProperties
+ }
+
+ if hasInvalidField {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_retention_filter_update_data.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_retention_filter_update_data.go
index db33856dcd..c3e8ed2747 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_retention_filter_update_data.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_retention_filter_update_data.go
@@ -13,7 +13,7 @@ import (
// RetentionFilterUpdateData The body of the retention filter to be updated.
type RetentionFilterUpdateData struct {
// The object describing the configuration of the retention filter to create/update.
- Attributes RetentionFilterCreateAttributes `json:"attributes"`
+ Attributes RetentionFilterUpdateAttributes `json:"attributes"`
// The ID of the retention filter.
Id string `json:"id"`
// The type of the resource.
@@ -27,7 +27,7 @@ type RetentionFilterUpdateData struct {
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed.
-func NewRetentionFilterUpdateData(attributes RetentionFilterCreateAttributes, id string, typeVar ApmRetentionFilterType) *RetentionFilterUpdateData {
+func NewRetentionFilterUpdateData(attributes RetentionFilterUpdateAttributes, id string, typeVar ApmRetentionFilterType) *RetentionFilterUpdateData {
this := RetentionFilterUpdateData{}
this.Attributes = attributes
this.Id = id
@@ -46,9 +46,9 @@ func NewRetentionFilterUpdateDataWithDefaults() *RetentionFilterUpdateData {
}
// GetAttributes returns the Attributes field value.
-func (o *RetentionFilterUpdateData) GetAttributes() RetentionFilterCreateAttributes {
+func (o *RetentionFilterUpdateData) GetAttributes() RetentionFilterUpdateAttributes {
if o == nil {
- var ret RetentionFilterCreateAttributes
+ var ret RetentionFilterUpdateAttributes
return ret
}
return o.Attributes
@@ -56,7 +56,7 @@ func (o *RetentionFilterUpdateData) GetAttributes() RetentionFilterCreateAttribu
// GetAttributesOk returns a tuple with the Attributes field value
// and a boolean to check if the value has been set.
-func (o *RetentionFilterUpdateData) GetAttributesOk() (*RetentionFilterCreateAttributes, bool) {
+func (o *RetentionFilterUpdateData) GetAttributesOk() (*RetentionFilterUpdateAttributes, bool) {
if o == nil {
return nil, false
}
@@ -64,7 +64,7 @@ func (o *RetentionFilterUpdateData) GetAttributesOk() (*RetentionFilterCreateAtt
}
// SetAttributes sets field value.
-func (o *RetentionFilterUpdateData) SetAttributes(v RetentionFilterCreateAttributes) {
+func (o *RetentionFilterUpdateData) SetAttributes(v RetentionFilterUpdateAttributes) {
o.Attributes = v
}
@@ -133,7 +133,7 @@ func (o RetentionFilterUpdateData) MarshalJSON() ([]byte, error) {
// UnmarshalJSON deserializes the given payload.
func (o *RetentionFilterUpdateData) UnmarshalJSON(bytes []byte) (err error) {
all := struct {
- Attributes *RetentionFilterCreateAttributes `json:"attributes"`
+ Attributes *RetentionFilterUpdateAttributes `json:"attributes"`
Id *string `json:"id"`
Type *ApmRetentionFilterType `json:"type"`
}{}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_rule_update_payload.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_rule_update_payload.go
index b434da4571..51089686ea 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_rule_update_payload.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_rule_update_payload.go
@@ -14,7 +14,7 @@ type SecurityMonitoringRuleUpdatePayload struct {
Cases []SecurityMonitoringRuleCase `json:"cases,omitempty"`
// How to generate compliance signals. Useful for cloud_configuration rules only.
ComplianceSignalOptions *CloudConfigurationRuleComplianceSignalOptions `json:"complianceSignalOptions,omitempty"`
- // Additional queries to filter matched events before they are processed.
+ // Additional queries to filter matched events before they are processed. This field is deprecated for log detection, signal correlation, and workload security rules.
Filters []SecurityMonitoringFilter `json:"filters,omitempty"`
// Whether the notifications include the triggering group-by values in their title.
HasExtendedTitle *bool `json:"hasExtendedTitle,omitempty"`
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_signal_rule_create_payload.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_signal_rule_create_payload.go
index b59b48c7de..68780c42ed 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_signal_rule_create_payload.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_signal_rule_create_payload.go
@@ -14,7 +14,7 @@ import (
type SecurityMonitoringSignalRuleCreatePayload struct {
// Cases for generating signals.
Cases []SecurityMonitoringRuleCaseCreate `json:"cases"`
- // Additional queries to filter matched events before they are processed.
+ // Additional queries to filter matched events before they are processed. This field is deprecated for log detection, signal correlation, and workload security rules.
Filters []SecurityMonitoringFilter `json:"filters,omitempty"`
// Whether the notifications include the triggering group-by values in their title.
HasExtendedTitle *bool `json:"hasExtendedTitle,omitempty"`
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_signal_rule_response.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_signal_rule_response.go
index ba51b35e70..277934cee9 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_signal_rule_response.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_signal_rule_response.go
@@ -18,7 +18,7 @@ type SecurityMonitoringSignalRuleResponse struct {
CreationAuthorId *int64 `json:"creationAuthorId,omitempty"`
// When the rule will be deprecated, timestamp in milliseconds.
DeprecationDate *int64 `json:"deprecationDate,omitempty"`
- // Additional queries to filter matched events before they are processed.
+ // Additional queries to filter matched events before they are processed. This field is deprecated for log detection, signal correlation, and workload security rules.
Filters []SecurityMonitoringFilter `json:"filters,omitempty"`
// Whether the notifications include the triggering group-by values in their title.
HasExtendedTitle *bool `json:"hasExtendedTitle,omitempty"`
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_standard_rule_create_payload.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_standard_rule_create_payload.go
index 33d8536fff..83c2c1f6f8 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_standard_rule_create_payload.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_standard_rule_create_payload.go
@@ -14,7 +14,7 @@ import (
type SecurityMonitoringStandardRuleCreatePayload struct {
// Cases for generating signals.
Cases []SecurityMonitoringRuleCaseCreate `json:"cases"`
- // Additional queries to filter matched events before they are processed.
+ // Additional queries to filter matched events before they are processed. This field is deprecated for log detection, signal correlation, and workload security rules.
Filters []SecurityMonitoringFilter `json:"filters,omitempty"`
// Whether the notifications include the triggering group-by values in their title.
HasExtendedTitle *bool `json:"hasExtendedTitle,omitempty"`
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_standard_rule_response.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_standard_rule_response.go
index 117ec1f9e1..3a2214350c 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_standard_rule_response.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_standard_rule_response.go
@@ -18,9 +18,11 @@ type SecurityMonitoringStandardRuleResponse struct {
CreatedAt *int64 `json:"createdAt,omitempty"`
// User ID of the user who created the rule.
CreationAuthorId *int64 `json:"creationAuthorId,omitempty"`
+ // Default Tags for default rules (included in tags)
+ DefaultTags []string `json:"defaultTags,omitempty"`
// When the rule will be deprecated, timestamp in milliseconds.
DeprecationDate *int64 `json:"deprecationDate,omitempty"`
- // Additional queries to filter matched events before they are processed.
+ // Additional queries to filter matched events before they are processed. This field is deprecated for log detection, signal correlation, and workload security rules.
Filters []SecurityMonitoringFilter `json:"filters,omitempty"`
// Whether the notifications include the triggering group-by values in their title.
HasExtendedTitle *bool `json:"hasExtendedTitle,omitempty"`
@@ -184,6 +186,34 @@ func (o *SecurityMonitoringStandardRuleResponse) SetCreationAuthorId(v int64) {
o.CreationAuthorId = &v
}
+// GetDefaultTags returns the DefaultTags field value if set, zero value otherwise.
+func (o *SecurityMonitoringStandardRuleResponse) GetDefaultTags() []string {
+ if o == nil || o.DefaultTags == nil {
+ var ret []string
+ return ret
+ }
+ return o.DefaultTags
+}
+
+// GetDefaultTagsOk returns a tuple with the DefaultTags field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SecurityMonitoringStandardRuleResponse) GetDefaultTagsOk() (*[]string, bool) {
+ if o == nil || o.DefaultTags == nil {
+ return nil, false
+ }
+ return &o.DefaultTags, true
+}
+
+// HasDefaultTags returns a boolean if a field has been set.
+func (o *SecurityMonitoringStandardRuleResponse) HasDefaultTags() bool {
+ return o != nil && o.DefaultTags != nil
+}
+
+// SetDefaultTags gets a reference to the given []string and assigns it to the DefaultTags field.
+func (o *SecurityMonitoringStandardRuleResponse) SetDefaultTags(v []string) {
+ o.DefaultTags = v
+}
+
// GetDeprecationDate returns the DeprecationDate field value if set, zero value otherwise.
func (o *SecurityMonitoringStandardRuleResponse) GetDeprecationDate() int64 {
if o == nil || o.DeprecationDate == nil {
@@ -650,6 +680,9 @@ func (o SecurityMonitoringStandardRuleResponse) MarshalJSON() ([]byte, error) {
if o.CreationAuthorId != nil {
toSerialize["creationAuthorId"] = o.CreationAuthorId
}
+ if o.DefaultTags != nil {
+ toSerialize["defaultTags"] = o.DefaultTags
+ }
if o.DeprecationDate != nil {
toSerialize["deprecationDate"] = o.DeprecationDate
}
@@ -712,6 +745,7 @@ func (o *SecurityMonitoringStandardRuleResponse) UnmarshalJSON(bytes []byte) (er
ComplianceSignalOptions *CloudConfigurationRuleComplianceSignalOptions `json:"complianceSignalOptions,omitempty"`
CreatedAt *int64 `json:"createdAt,omitempty"`
CreationAuthorId *int64 `json:"creationAuthorId,omitempty"`
+ DefaultTags []string `json:"defaultTags,omitempty"`
DeprecationDate *int64 `json:"deprecationDate,omitempty"`
Filters []SecurityMonitoringFilter `json:"filters,omitempty"`
HasExtendedTitle *bool `json:"hasExtendedTitle,omitempty"`
@@ -734,7 +768,7 @@ func (o *SecurityMonitoringStandardRuleResponse) UnmarshalJSON(bytes []byte) (er
}
additionalProperties := make(map[string]interface{})
if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
- datadog.DeleteKeys(additionalProperties, &[]string{"cases", "complianceSignalOptions", "createdAt", "creationAuthorId", "deprecationDate", "filters", "hasExtendedTitle", "id", "isDefault", "isDeleted", "isEnabled", "message", "name", "options", "queries", "tags", "thirdPartyCases", "type", "updateAuthorId", "version"})
+ datadog.DeleteKeys(additionalProperties, &[]string{"cases", "complianceSignalOptions", "createdAt", "creationAuthorId", "defaultTags", "deprecationDate", "filters", "hasExtendedTitle", "id", "isDefault", "isDeleted", "isEnabled", "message", "name", "options", "queries", "tags", "thirdPartyCases", "type", "updateAuthorId", "version"})
} else {
return err
}
@@ -747,6 +781,7 @@ func (o *SecurityMonitoringStandardRuleResponse) UnmarshalJSON(bytes []byte) (er
o.ComplianceSignalOptions = all.ComplianceSignalOptions
o.CreatedAt = all.CreatedAt
o.CreationAuthorId = all.CreationAuthorId
+ o.DefaultTags = all.DefaultTags
o.DeprecationDate = all.DeprecationDate
o.Filters = all.Filters
o.HasExtendedTitle = all.HasExtendedTitle
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_suppression_attributes.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_suppression_attributes.go
index 14b93354ce..5f0ca86b8d 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_suppression_attributes.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_suppression_attributes.go
@@ -14,6 +14,8 @@ type SecurityMonitoringSuppressionAttributes struct {
CreationDate *int64 `json:"creation_date,omitempty"`
// A user.
Creator *SecurityMonitoringUser `json:"creator,omitempty"`
+ // An exclusion query on the input data of the security rules, which could be logs, Agent events, or other types of data based on the security rule. Events matching this query are ignored by any detection rules referenced in the suppression rule.
+ DataExclusionQuery *string `json:"data_exclusion_query,omitempty"`
// A description for the suppression rule.
Description *string `json:"description,omitempty"`
// Whether the suppression rule is enabled.
@@ -110,6 +112,34 @@ func (o *SecurityMonitoringSuppressionAttributes) SetCreator(v SecurityMonitorin
o.Creator = &v
}
+// GetDataExclusionQuery returns the DataExclusionQuery field value if set, zero value otherwise.
+func (o *SecurityMonitoringSuppressionAttributes) GetDataExclusionQuery() string {
+ if o == nil || o.DataExclusionQuery == nil {
+ var ret string
+ return ret
+ }
+ return *o.DataExclusionQuery
+}
+
+// GetDataExclusionQueryOk returns a tuple with the DataExclusionQuery field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SecurityMonitoringSuppressionAttributes) GetDataExclusionQueryOk() (*string, bool) {
+ if o == nil || o.DataExclusionQuery == nil {
+ return nil, false
+ }
+ return o.DataExclusionQuery, true
+}
+
+// HasDataExclusionQuery returns a boolean if a field has been set.
+func (o *SecurityMonitoringSuppressionAttributes) HasDataExclusionQuery() bool {
+ return o != nil && o.DataExclusionQuery != nil
+}
+
+// SetDataExclusionQuery gets a reference to the given string and assigns it to the DataExclusionQuery field.
+func (o *SecurityMonitoringSuppressionAttributes) SetDataExclusionQuery(v string) {
+ o.DataExclusionQuery = &v
+}
+
// GetDescription returns the Description field value if set, zero value otherwise.
func (o *SecurityMonitoringSuppressionAttributes) GetDescription() string {
if o == nil || o.Description == nil {
@@ -374,6 +404,9 @@ func (o SecurityMonitoringSuppressionAttributes) MarshalJSON() ([]byte, error) {
if o.Creator != nil {
toSerialize["creator"] = o.Creator
}
+ if o.DataExclusionQuery != nil {
+ toSerialize["data_exclusion_query"] = o.DataExclusionQuery
+ }
if o.Description != nil {
toSerialize["description"] = o.Description
}
@@ -411,24 +444,25 @@ func (o SecurityMonitoringSuppressionAttributes) MarshalJSON() ([]byte, error) {
// UnmarshalJSON deserializes the given payload.
func (o *SecurityMonitoringSuppressionAttributes) UnmarshalJSON(bytes []byte) (err error) {
all := struct {
- CreationDate *int64 `json:"creation_date,omitempty"`
- Creator *SecurityMonitoringUser `json:"creator,omitempty"`
- Description *string `json:"description,omitempty"`
- Enabled *bool `json:"enabled,omitempty"`
- ExpirationDate *int64 `json:"expiration_date,omitempty"`
- Name *string `json:"name,omitempty"`
- RuleQuery *string `json:"rule_query,omitempty"`
- SuppressionQuery *string `json:"suppression_query,omitempty"`
- UpdateDate *int64 `json:"update_date,omitempty"`
- Updater *SecurityMonitoringUser `json:"updater,omitempty"`
- Version *int32 `json:"version,omitempty"`
+ CreationDate *int64 `json:"creation_date,omitempty"`
+ Creator *SecurityMonitoringUser `json:"creator,omitempty"`
+ DataExclusionQuery *string `json:"data_exclusion_query,omitempty"`
+ Description *string `json:"description,omitempty"`
+ Enabled *bool `json:"enabled,omitempty"`
+ ExpirationDate *int64 `json:"expiration_date,omitempty"`
+ Name *string `json:"name,omitempty"`
+ RuleQuery *string `json:"rule_query,omitempty"`
+ SuppressionQuery *string `json:"suppression_query,omitempty"`
+ UpdateDate *int64 `json:"update_date,omitempty"`
+ Updater *SecurityMonitoringUser `json:"updater,omitempty"`
+ Version *int32 `json:"version,omitempty"`
}{}
if err = datadog.Unmarshal(bytes, &all); err != nil {
return datadog.Unmarshal(bytes, &o.UnparsedObject)
}
additionalProperties := make(map[string]interface{})
if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
- datadog.DeleteKeys(additionalProperties, &[]string{"creation_date", "creator", "description", "enabled", "expiration_date", "name", "rule_query", "suppression_query", "update_date", "updater", "version"})
+ datadog.DeleteKeys(additionalProperties, &[]string{"creation_date", "creator", "data_exclusion_query", "description", "enabled", "expiration_date", "name", "rule_query", "suppression_query", "update_date", "updater", "version"})
} else {
return err
}
@@ -439,6 +473,7 @@ func (o *SecurityMonitoringSuppressionAttributes) UnmarshalJSON(bytes []byte) (e
hasInvalidField = true
}
o.Creator = all.Creator
+ o.DataExclusionQuery = all.DataExclusionQuery
o.Description = all.Description
o.Enabled = all.Enabled
o.ExpirationDate = all.ExpirationDate
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_suppression_create_attributes.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_suppression_create_attributes.go
index 9c566a849d..35ffa78fe8 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_suppression_create_attributes.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_suppression_create_attributes.go
@@ -12,6 +12,8 @@ import (
// SecurityMonitoringSuppressionCreateAttributes Object containing the attributes of the suppression rule to be created.
type SecurityMonitoringSuppressionCreateAttributes struct {
+ // An exclusion query on the input data of the security rules, which could be logs, Agent events, or other types of data based on the security rule. Events matching this query are ignored by any detection rules referenced in the suppression rule.
+ DataExclusionQuery *string `json:"data_exclusion_query,omitempty"`
// A description for the suppression rule.
Description *string `json:"description,omitempty"`
// Whether the suppression rule is enabled.
@@ -22,8 +24,8 @@ type SecurityMonitoringSuppressionCreateAttributes struct {
Name string `json:"name"`
// The rule query of the suppression rule, with the same syntax as the search bar for detection rules.
RuleQuery string `json:"rule_query"`
- // The suppression query of the suppression rule. If a signal matches this query, it is suppressed and is not triggered . Same syntax as the queries to search signals in the signal explorer.
- SuppressionQuery string `json:"suppression_query"`
+ // The suppression query of the suppression rule. If a signal matches this query, it is suppressed and is not triggered. It uses the same syntax as the queries to search signals in the Signals Explorer.
+ SuppressionQuery *string `json:"suppression_query,omitempty"`
// UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
UnparsedObject map[string]interface{} `json:"-"`
AdditionalProperties map[string]interface{}
@@ -33,12 +35,11 @@ type SecurityMonitoringSuppressionCreateAttributes struct {
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed.
-func NewSecurityMonitoringSuppressionCreateAttributes(enabled bool, name string, ruleQuery string, suppressionQuery string) *SecurityMonitoringSuppressionCreateAttributes {
+func NewSecurityMonitoringSuppressionCreateAttributes(enabled bool, name string, ruleQuery string) *SecurityMonitoringSuppressionCreateAttributes {
this := SecurityMonitoringSuppressionCreateAttributes{}
this.Enabled = enabled
this.Name = name
this.RuleQuery = ruleQuery
- this.SuppressionQuery = suppressionQuery
return &this
}
@@ -50,6 +51,34 @@ func NewSecurityMonitoringSuppressionCreateAttributesWithDefaults() *SecurityMon
return &this
}
+// GetDataExclusionQuery returns the DataExclusionQuery field value if set, zero value otherwise.
+func (o *SecurityMonitoringSuppressionCreateAttributes) GetDataExclusionQuery() string {
+ if o == nil || o.DataExclusionQuery == nil {
+ var ret string
+ return ret
+ }
+ return *o.DataExclusionQuery
+}
+
+// GetDataExclusionQueryOk returns a tuple with the DataExclusionQuery field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SecurityMonitoringSuppressionCreateAttributes) GetDataExclusionQueryOk() (*string, bool) {
+ if o == nil || o.DataExclusionQuery == nil {
+ return nil, false
+ }
+ return o.DataExclusionQuery, true
+}
+
+// HasDataExclusionQuery returns a boolean if a field has been set.
+func (o *SecurityMonitoringSuppressionCreateAttributes) HasDataExclusionQuery() bool {
+ return o != nil && o.DataExclusionQuery != nil
+}
+
+// SetDataExclusionQuery gets a reference to the given string and assigns it to the DataExclusionQuery field.
+func (o *SecurityMonitoringSuppressionCreateAttributes) SetDataExclusionQuery(v string) {
+ o.DataExclusionQuery = &v
+}
+
// GetDescription returns the Description field value if set, zero value otherwise.
func (o *SecurityMonitoringSuppressionCreateAttributes) GetDescription() string {
if o == nil || o.Description == nil {
@@ -175,27 +204,32 @@ func (o *SecurityMonitoringSuppressionCreateAttributes) SetRuleQuery(v string) {
o.RuleQuery = v
}
-// GetSuppressionQuery returns the SuppressionQuery field value.
+// GetSuppressionQuery returns the SuppressionQuery field value if set, zero value otherwise.
func (o *SecurityMonitoringSuppressionCreateAttributes) GetSuppressionQuery() string {
- if o == nil {
+ if o == nil || o.SuppressionQuery == nil {
var ret string
return ret
}
- return o.SuppressionQuery
+ return *o.SuppressionQuery
}
-// GetSuppressionQueryOk returns a tuple with the SuppressionQuery field value
+// GetSuppressionQueryOk returns a tuple with the SuppressionQuery field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *SecurityMonitoringSuppressionCreateAttributes) GetSuppressionQueryOk() (*string, bool) {
- if o == nil {
+ if o == nil || o.SuppressionQuery == nil {
return nil, false
}
- return &o.SuppressionQuery, true
+ return o.SuppressionQuery, true
}
-// SetSuppressionQuery sets field value.
+// HasSuppressionQuery returns a boolean if a field has been set.
+func (o *SecurityMonitoringSuppressionCreateAttributes) HasSuppressionQuery() bool {
+ return o != nil && o.SuppressionQuery != nil
+}
+
+// SetSuppressionQuery gets a reference to the given string and assigns it to the SuppressionQuery field.
func (o *SecurityMonitoringSuppressionCreateAttributes) SetSuppressionQuery(v string) {
- o.SuppressionQuery = v
+ o.SuppressionQuery = &v
}
// MarshalJSON serializes the struct using spec logic.
@@ -204,6 +238,9 @@ func (o SecurityMonitoringSuppressionCreateAttributes) MarshalJSON() ([]byte, er
if o.UnparsedObject != nil {
return datadog.Marshal(o.UnparsedObject)
}
+ if o.DataExclusionQuery != nil {
+ toSerialize["data_exclusion_query"] = o.DataExclusionQuery
+ }
if o.Description != nil {
toSerialize["description"] = o.Description
}
@@ -213,7 +250,9 @@ func (o SecurityMonitoringSuppressionCreateAttributes) MarshalJSON() ([]byte, er
}
toSerialize["name"] = o.Name
toSerialize["rule_query"] = o.RuleQuery
- toSerialize["suppression_query"] = o.SuppressionQuery
+ if o.SuppressionQuery != nil {
+ toSerialize["suppression_query"] = o.SuppressionQuery
+ }
for key, value := range o.AdditionalProperties {
toSerialize[key] = value
@@ -224,12 +263,13 @@ func (o SecurityMonitoringSuppressionCreateAttributes) MarshalJSON() ([]byte, er
// UnmarshalJSON deserializes the given payload.
func (o *SecurityMonitoringSuppressionCreateAttributes) UnmarshalJSON(bytes []byte) (err error) {
all := struct {
- Description *string `json:"description,omitempty"`
- Enabled *bool `json:"enabled"`
- ExpirationDate *int64 `json:"expiration_date,omitempty"`
- Name *string `json:"name"`
- RuleQuery *string `json:"rule_query"`
- SuppressionQuery *string `json:"suppression_query"`
+ DataExclusionQuery *string `json:"data_exclusion_query,omitempty"`
+ Description *string `json:"description,omitempty"`
+ Enabled *bool `json:"enabled"`
+ ExpirationDate *int64 `json:"expiration_date,omitempty"`
+ Name *string `json:"name"`
+ RuleQuery *string `json:"rule_query"`
+ SuppressionQuery *string `json:"suppression_query,omitempty"`
}{}
if err = datadog.Unmarshal(bytes, &all); err != nil {
return datadog.Unmarshal(bytes, &o.UnparsedObject)
@@ -243,21 +283,19 @@ func (o *SecurityMonitoringSuppressionCreateAttributes) UnmarshalJSON(bytes []by
if all.RuleQuery == nil {
return fmt.Errorf("required field rule_query missing")
}
- if all.SuppressionQuery == nil {
- return fmt.Errorf("required field suppression_query missing")
- }
additionalProperties := make(map[string]interface{})
if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
- datadog.DeleteKeys(additionalProperties, &[]string{"description", "enabled", "expiration_date", "name", "rule_query", "suppression_query"})
+ datadog.DeleteKeys(additionalProperties, &[]string{"data_exclusion_query", "description", "enabled", "expiration_date", "name", "rule_query", "suppression_query"})
} else {
return err
}
+ o.DataExclusionQuery = all.DataExclusionQuery
o.Description = all.Description
o.Enabled = *all.Enabled
o.ExpirationDate = all.ExpirationDate
o.Name = *all.Name
o.RuleQuery = *all.RuleQuery
- o.SuppressionQuery = *all.SuppressionQuery
+ o.SuppressionQuery = all.SuppressionQuery
if len(additionalProperties) > 0 {
o.AdditionalProperties = additionalProperties
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_suppression_update_attributes.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_suppression_update_attributes.go
index eddd00b382..829286696a 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_suppression_update_attributes.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_security_monitoring_suppression_update_attributes.go
@@ -10,6 +10,8 @@ import (
// SecurityMonitoringSuppressionUpdateAttributes The suppression rule properties to be updated.
type SecurityMonitoringSuppressionUpdateAttributes struct {
+ // An exclusion query on the input data of the security rules, which could be logs, Agent events, or other types of data based on the security rule. Events matching this query are ignored by any detection rules referenced in the suppression rule.
+ DataExclusionQuery *string `json:"data_exclusion_query,omitempty"`
// A description for the suppression rule.
Description *string `json:"description,omitempty"`
// Whether the suppression rule is enabled.
@@ -46,6 +48,34 @@ func NewSecurityMonitoringSuppressionUpdateAttributesWithDefaults() *SecurityMon
return &this
}
+// GetDataExclusionQuery returns the DataExclusionQuery field value if set, zero value otherwise.
+func (o *SecurityMonitoringSuppressionUpdateAttributes) GetDataExclusionQuery() string {
+ if o == nil || o.DataExclusionQuery == nil {
+ var ret string
+ return ret
+ }
+ return *o.DataExclusionQuery
+}
+
+// GetDataExclusionQueryOk returns a tuple with the DataExclusionQuery field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SecurityMonitoringSuppressionUpdateAttributes) GetDataExclusionQueryOk() (*string, bool) {
+ if o == nil || o.DataExclusionQuery == nil {
+ return nil, false
+ }
+ return o.DataExclusionQuery, true
+}
+
+// HasDataExclusionQuery returns a boolean if a field has been set.
+func (o *SecurityMonitoringSuppressionUpdateAttributes) HasDataExclusionQuery() bool {
+ return o != nil && o.DataExclusionQuery != nil
+}
+
+// SetDataExclusionQuery gets a reference to the given string and assigns it to the DataExclusionQuery field.
+func (o *SecurityMonitoringSuppressionUpdateAttributes) SetDataExclusionQuery(v string) {
+ o.DataExclusionQuery = &v
+}
+
// GetDescription returns the Description field value if set, zero value otherwise.
func (o *SecurityMonitoringSuppressionUpdateAttributes) GetDescription() string {
if o == nil || o.Description == nil {
@@ -259,6 +289,9 @@ func (o SecurityMonitoringSuppressionUpdateAttributes) MarshalJSON() ([]byte, er
if o.UnparsedObject != nil {
return datadog.Marshal(o.UnparsedObject)
}
+ if o.DataExclusionQuery != nil {
+ toSerialize["data_exclusion_query"] = o.DataExclusionQuery
+ }
if o.Description != nil {
toSerialize["description"] = o.Description
}
@@ -290,23 +323,25 @@ func (o SecurityMonitoringSuppressionUpdateAttributes) MarshalJSON() ([]byte, er
// UnmarshalJSON deserializes the given payload.
func (o *SecurityMonitoringSuppressionUpdateAttributes) UnmarshalJSON(bytes []byte) (err error) {
all := struct {
- Description *string `json:"description,omitempty"`
- Enabled *bool `json:"enabled,omitempty"`
- ExpirationDate datadog.NullableInt64 `json:"expiration_date,omitempty"`
- Name *string `json:"name,omitempty"`
- RuleQuery *string `json:"rule_query,omitempty"`
- SuppressionQuery *string `json:"suppression_query,omitempty"`
- Version *int32 `json:"version,omitempty"`
+ DataExclusionQuery *string `json:"data_exclusion_query,omitempty"`
+ Description *string `json:"description,omitempty"`
+ Enabled *bool `json:"enabled,omitempty"`
+ ExpirationDate datadog.NullableInt64 `json:"expiration_date,omitempty"`
+ Name *string `json:"name,omitempty"`
+ RuleQuery *string `json:"rule_query,omitempty"`
+ SuppressionQuery *string `json:"suppression_query,omitempty"`
+ Version *int32 `json:"version,omitempty"`
}{}
if err = datadog.Unmarshal(bytes, &all); err != nil {
return datadog.Unmarshal(bytes, &o.UnparsedObject)
}
additionalProperties := make(map[string]interface{})
if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
- datadog.DeleteKeys(additionalProperties, &[]string{"description", "enabled", "expiration_date", "name", "rule_query", "suppression_query", "version"})
+ datadog.DeleteKeys(additionalProperties, &[]string{"data_exclusion_query", "description", "enabled", "expiration_date", "name", "rule_query", "suppression_query", "version"})
} else {
return err
}
+ o.DataExclusionQuery = all.DataExclusionQuery
o.Description = all.Description
o.Enabled = all.Enabled
o.ExpirationDate = all.ExpirationDate
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_service_definition_v2_dot2.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_service_definition_v2_dot2.go
index 789239988d..868386592d 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_service_definition_v2_dot2.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_service_definition_v2_dot2.go
@@ -14,6 +14,8 @@ import (
type ServiceDefinitionV2Dot2 struct {
// Identifier for a group of related services serving a product feature, which the service is a part of.
Application *string `json:"application,omitempty"`
+ // A set of CI fingerprints.
+ CiPipelineFingerprints []string `json:"ci-pipeline-fingerprints,omitempty"`
// A list of contacts related to the services.
Contacts []ServiceDefinitionV2Dot2Contact `json:"contacts,omitempty"`
// Unique identifier of the service. Must be unique across all services and is used to match with a service in Datadog.
@@ -94,6 +96,34 @@ func (o *ServiceDefinitionV2Dot2) SetApplication(v string) {
o.Application = &v
}
+// GetCiPipelineFingerprints returns the CiPipelineFingerprints field value if set, zero value otherwise.
+func (o *ServiceDefinitionV2Dot2) GetCiPipelineFingerprints() []string {
+ if o == nil || o.CiPipelineFingerprints == nil {
+ var ret []string
+ return ret
+ }
+ return o.CiPipelineFingerprints
+}
+
+// GetCiPipelineFingerprintsOk returns a tuple with the CiPipelineFingerprints field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *ServiceDefinitionV2Dot2) GetCiPipelineFingerprintsOk() (*[]string, bool) {
+ if o == nil || o.CiPipelineFingerprints == nil {
+ return nil, false
+ }
+ return &o.CiPipelineFingerprints, true
+}
+
+// HasCiPipelineFingerprints returns a boolean if a field has been set.
+func (o *ServiceDefinitionV2Dot2) HasCiPipelineFingerprints() bool {
+ return o != nil && o.CiPipelineFingerprints != nil
+}
+
+// SetCiPipelineFingerprints gets a reference to the given []string and assigns it to the CiPipelineFingerprints field.
+func (o *ServiceDefinitionV2Dot2) SetCiPipelineFingerprints(v []string) {
+ o.CiPipelineFingerprints = v
+}
+
// GetContacts returns the Contacts field value if set, zero value otherwise.
func (o *ServiceDefinitionV2Dot2) GetContacts() []ServiceDefinitionV2Dot2Contact {
if o == nil || o.Contacts == nil {
@@ -457,6 +487,9 @@ func (o ServiceDefinitionV2Dot2) MarshalJSON() ([]byte, error) {
if o.Application != nil {
toSerialize["application"] = o.Application
}
+ if o.CiPipelineFingerprints != nil {
+ toSerialize["ci-pipeline-fingerprints"] = o.CiPipelineFingerprints
+ }
if o.Contacts != nil {
toSerialize["contacts"] = o.Contacts
}
@@ -502,20 +535,21 @@ func (o ServiceDefinitionV2Dot2) MarshalJSON() ([]byte, error) {
// UnmarshalJSON deserializes the given payload.
func (o *ServiceDefinitionV2Dot2) UnmarshalJSON(bytes []byte) (err error) {
all := struct {
- Application *string `json:"application,omitempty"`
- Contacts []ServiceDefinitionV2Dot2Contact `json:"contacts,omitempty"`
- DdService *string `json:"dd-service"`
- Description *string `json:"description,omitempty"`
- Extensions map[string]interface{} `json:"extensions,omitempty"`
- Integrations *ServiceDefinitionV2Dot2Integrations `json:"integrations,omitempty"`
- Languages []string `json:"languages,omitempty"`
- Lifecycle *string `json:"lifecycle,omitempty"`
- Links []ServiceDefinitionV2Dot2Link `json:"links,omitempty"`
- SchemaVersion *ServiceDefinitionV2Dot2Version `json:"schema-version"`
- Tags []string `json:"tags,omitempty"`
- Team *string `json:"team,omitempty"`
- Tier *string `json:"tier,omitempty"`
- Type *ServiceDefinitionV2Dot2Type `json:"type,omitempty"`
+ Application *string `json:"application,omitempty"`
+ CiPipelineFingerprints []string `json:"ci-pipeline-fingerprints,omitempty"`
+ Contacts []ServiceDefinitionV2Dot2Contact `json:"contacts,omitempty"`
+ DdService *string `json:"dd-service"`
+ Description *string `json:"description,omitempty"`
+ Extensions map[string]interface{} `json:"extensions,omitempty"`
+ Integrations *ServiceDefinitionV2Dot2Integrations `json:"integrations,omitempty"`
+ Languages []string `json:"languages,omitempty"`
+ Lifecycle *string `json:"lifecycle,omitempty"`
+ Links []ServiceDefinitionV2Dot2Link `json:"links,omitempty"`
+ SchemaVersion *ServiceDefinitionV2Dot2Version `json:"schema-version"`
+ Tags []string `json:"tags,omitempty"`
+ Team *string `json:"team,omitempty"`
+ Tier *string `json:"tier,omitempty"`
+ Type *ServiceDefinitionV2Dot2Type `json:"type,omitempty"`
}{}
if err = datadog.Unmarshal(bytes, &all); err != nil {
return datadog.Unmarshal(bytes, &o.UnparsedObject)
@@ -528,13 +562,14 @@ func (o *ServiceDefinitionV2Dot2) UnmarshalJSON(bytes []byte) (err error) {
}
additionalProperties := make(map[string]interface{})
if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
- datadog.DeleteKeys(additionalProperties, &[]string{"application", "contacts", "dd-service", "description", "extensions", "integrations", "languages", "lifecycle", "links", "schema-version", "tags", "team", "tier", "type"})
+ datadog.DeleteKeys(additionalProperties, &[]string{"application", "ci-pipeline-fingerprints", "contacts", "dd-service", "description", "extensions", "integrations", "languages", "lifecycle", "links", "schema-version", "tags", "team", "tier", "type"})
} else {
return err
}
hasInvalidField := false
o.Application = all.Application
+ o.CiPipelineFingerprints = all.CiPipelineFingerprints
o.Contacts = all.Contacts
o.DdService = *all.DdService
o.Description = all.Description
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_create_request.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_create_request.go
new file mode 100644
index 0000000000..e4d2033e3a
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_create_request.go
@@ -0,0 +1,110 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV2
+
+import (
+ "fmt"
+
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// SloReportCreateRequest The SLO report request body.
+type SloReportCreateRequest struct {
+ // The data portion of the SLO report request.
+ Data SloReportCreateRequestData `json:"data"`
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject map[string]interface{} `json:"-"`
+ AdditionalProperties map[string]interface{}
+}
+
+// NewSloReportCreateRequest instantiates a new SloReportCreateRequest object.
+// This constructor will assign default values to properties that have it defined,
+// and makes sure properties required by API are set, but the set of arguments
+// will change when the set of required properties is changed.
+func NewSloReportCreateRequest(data SloReportCreateRequestData) *SloReportCreateRequest {
+ this := SloReportCreateRequest{}
+ this.Data = data
+ return &this
+}
+
+// NewSloReportCreateRequestWithDefaults instantiates a new SloReportCreateRequest object.
+// This constructor will only assign default values to properties that have it defined,
+// but it doesn't guarantee that properties required by API are set.
+func NewSloReportCreateRequestWithDefaults() *SloReportCreateRequest {
+ this := SloReportCreateRequest{}
+ return &this
+}
+
+// GetData returns the Data field value.
+func (o *SloReportCreateRequest) GetData() SloReportCreateRequestData {
+ if o == nil {
+ var ret SloReportCreateRequestData
+ return ret
+ }
+ return o.Data
+}
+
+// GetDataOk returns a tuple with the Data field value
+// and a boolean to check if the value has been set.
+func (o *SloReportCreateRequest) GetDataOk() (*SloReportCreateRequestData, bool) {
+ if o == nil {
+ return nil, false
+ }
+ return &o.Data, true
+}
+
+// SetData sets field value.
+func (o *SloReportCreateRequest) SetData(v SloReportCreateRequestData) {
+ o.Data = v
+}
+
+// MarshalJSON serializes the struct using spec logic.
+func (o SloReportCreateRequest) MarshalJSON() ([]byte, error) {
+ toSerialize := map[string]interface{}{}
+ if o.UnparsedObject != nil {
+ return datadog.Marshal(o.UnparsedObject)
+ }
+ toSerialize["data"] = o.Data
+
+ for key, value := range o.AdditionalProperties {
+ toSerialize[key] = value
+ }
+ return datadog.Marshal(toSerialize)
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (o *SloReportCreateRequest) UnmarshalJSON(bytes []byte) (err error) {
+ all := struct {
+ Data *SloReportCreateRequestData `json:"data"`
+ }{}
+ if err = datadog.Unmarshal(bytes, &all); err != nil {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+ if all.Data == nil {
+ return fmt.Errorf("required field data missing")
+ }
+ additionalProperties := make(map[string]interface{})
+ if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
+ datadog.DeleteKeys(additionalProperties, &[]string{"data"})
+ } else {
+ return err
+ }
+
+ hasInvalidField := false
+ if all.Data.UnparsedObject != nil && o.UnparsedObject == nil {
+ hasInvalidField = true
+ }
+ o.Data = *all.Data
+
+ if len(additionalProperties) > 0 {
+ o.AdditionalProperties = additionalProperties
+ }
+
+ if hasInvalidField {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_create_request_attributes.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_create_request_attributes.go
new file mode 100644
index 0000000000..091e6cf3d1
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_create_request_attributes.go
@@ -0,0 +1,245 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV2
+
+import (
+ "fmt"
+
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// SloReportCreateRequestAttributes The attributes portion of the SLO report request.
+type SloReportCreateRequestAttributes struct {
+ // The `from` timestamp for the report in epoch seconds.
+ FromTs int64 `json:"from_ts"`
+ // The frequency at which report data is to be generated.
+ Interval *SLOReportInterval `json:"interval,omitempty"`
+ // The query string used to filter SLO results. Some examples of queries include `service:` and `slo-name`.
+ Query string `json:"query"`
+ // The timezone used to determine the start and end of each interval. For example, weekly intervals start at 12am on Sunday in the specified timezone.
+ Timezone *string `json:"timezone,omitempty"`
+ // The `to` timestamp for the report in epoch seconds.
+ ToTs int64 `json:"to_ts"`
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject map[string]interface{} `json:"-"`
+ AdditionalProperties map[string]interface{}
+}
+
+// NewSloReportCreateRequestAttributes instantiates a new SloReportCreateRequestAttributes object.
+// This constructor will assign default values to properties that have it defined,
+// and makes sure properties required by API are set, but the set of arguments
+// will change when the set of required properties is changed.
+func NewSloReportCreateRequestAttributes(fromTs int64, query string, toTs int64) *SloReportCreateRequestAttributes {
+ this := SloReportCreateRequestAttributes{}
+ this.FromTs = fromTs
+ this.Query = query
+ this.ToTs = toTs
+ return &this
+}
+
+// NewSloReportCreateRequestAttributesWithDefaults instantiates a new SloReportCreateRequestAttributes object.
+// This constructor will only assign default values to properties that have it defined,
+// but it doesn't guarantee that properties required by API are set.
+func NewSloReportCreateRequestAttributesWithDefaults() *SloReportCreateRequestAttributes {
+ this := SloReportCreateRequestAttributes{}
+ return &this
+}
+
+// GetFromTs returns the FromTs field value.
+func (o *SloReportCreateRequestAttributes) GetFromTs() int64 {
+ if o == nil {
+ var ret int64
+ return ret
+ }
+ return o.FromTs
+}
+
+// GetFromTsOk returns a tuple with the FromTs field value
+// and a boolean to check if the value has been set.
+func (o *SloReportCreateRequestAttributes) GetFromTsOk() (*int64, bool) {
+ if o == nil {
+ return nil, false
+ }
+ return &o.FromTs, true
+}
+
+// SetFromTs sets field value.
+func (o *SloReportCreateRequestAttributes) SetFromTs(v int64) {
+ o.FromTs = v
+}
+
+// GetInterval returns the Interval field value if set, zero value otherwise.
+func (o *SloReportCreateRequestAttributes) GetInterval() SLOReportInterval {
+ if o == nil || o.Interval == nil {
+ var ret SLOReportInterval
+ return ret
+ }
+ return *o.Interval
+}
+
+// GetIntervalOk returns a tuple with the Interval field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SloReportCreateRequestAttributes) GetIntervalOk() (*SLOReportInterval, bool) {
+ if o == nil || o.Interval == nil {
+ return nil, false
+ }
+ return o.Interval, true
+}
+
+// HasInterval returns a boolean if a field has been set.
+func (o *SloReportCreateRequestAttributes) HasInterval() bool {
+ return o != nil && o.Interval != nil
+}
+
+// SetInterval gets a reference to the given SLOReportInterval and assigns it to the Interval field.
+func (o *SloReportCreateRequestAttributes) SetInterval(v SLOReportInterval) {
+ o.Interval = &v
+}
+
+// GetQuery returns the Query field value.
+func (o *SloReportCreateRequestAttributes) GetQuery() string {
+ if o == nil {
+ var ret string
+ return ret
+ }
+ return o.Query
+}
+
+// GetQueryOk returns a tuple with the Query field value
+// and a boolean to check if the value has been set.
+func (o *SloReportCreateRequestAttributes) GetQueryOk() (*string, bool) {
+ if o == nil {
+ return nil, false
+ }
+ return &o.Query, true
+}
+
+// SetQuery sets field value.
+func (o *SloReportCreateRequestAttributes) SetQuery(v string) {
+ o.Query = v
+}
+
+// GetTimezone returns the Timezone field value if set, zero value otherwise.
+func (o *SloReportCreateRequestAttributes) GetTimezone() string {
+ if o == nil || o.Timezone == nil {
+ var ret string
+ return ret
+ }
+ return *o.Timezone
+}
+
+// GetTimezoneOk returns a tuple with the Timezone field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SloReportCreateRequestAttributes) GetTimezoneOk() (*string, bool) {
+ if o == nil || o.Timezone == nil {
+ return nil, false
+ }
+ return o.Timezone, true
+}
+
+// HasTimezone returns a boolean if a field has been set.
+func (o *SloReportCreateRequestAttributes) HasTimezone() bool {
+ return o != nil && o.Timezone != nil
+}
+
+// SetTimezone gets a reference to the given string and assigns it to the Timezone field.
+func (o *SloReportCreateRequestAttributes) SetTimezone(v string) {
+ o.Timezone = &v
+}
+
+// GetToTs returns the ToTs field value.
+func (o *SloReportCreateRequestAttributes) GetToTs() int64 {
+ if o == nil {
+ var ret int64
+ return ret
+ }
+ return o.ToTs
+}
+
+// GetToTsOk returns a tuple with the ToTs field value
+// and a boolean to check if the value has been set.
+func (o *SloReportCreateRequestAttributes) GetToTsOk() (*int64, bool) {
+ if o == nil {
+ return nil, false
+ }
+ return &o.ToTs, true
+}
+
+// SetToTs sets field value.
+func (o *SloReportCreateRequestAttributes) SetToTs(v int64) {
+ o.ToTs = v
+}
+
+// MarshalJSON serializes the struct using spec logic.
+func (o SloReportCreateRequestAttributes) MarshalJSON() ([]byte, error) {
+ toSerialize := map[string]interface{}{}
+ if o.UnparsedObject != nil {
+ return datadog.Marshal(o.UnparsedObject)
+ }
+ toSerialize["from_ts"] = o.FromTs
+ if o.Interval != nil {
+ toSerialize["interval"] = o.Interval
+ }
+ toSerialize["query"] = o.Query
+ if o.Timezone != nil {
+ toSerialize["timezone"] = o.Timezone
+ }
+ toSerialize["to_ts"] = o.ToTs
+
+ for key, value := range o.AdditionalProperties {
+ toSerialize[key] = value
+ }
+ return datadog.Marshal(toSerialize)
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (o *SloReportCreateRequestAttributes) UnmarshalJSON(bytes []byte) (err error) {
+ all := struct {
+ FromTs *int64 `json:"from_ts"`
+ Interval *SLOReportInterval `json:"interval,omitempty"`
+ Query *string `json:"query"`
+ Timezone *string `json:"timezone,omitempty"`
+ ToTs *int64 `json:"to_ts"`
+ }{}
+ if err = datadog.Unmarshal(bytes, &all); err != nil {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+ if all.FromTs == nil {
+ return fmt.Errorf("required field from_ts missing")
+ }
+ if all.Query == nil {
+ return fmt.Errorf("required field query missing")
+ }
+ if all.ToTs == nil {
+ return fmt.Errorf("required field to_ts missing")
+ }
+ additionalProperties := make(map[string]interface{})
+ if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
+ datadog.DeleteKeys(additionalProperties, &[]string{"from_ts", "interval", "query", "timezone", "to_ts"})
+ } else {
+ return err
+ }
+
+ hasInvalidField := false
+ o.FromTs = *all.FromTs
+ if all.Interval != nil && !all.Interval.IsValid() {
+ hasInvalidField = true
+ } else {
+ o.Interval = all.Interval
+ }
+ o.Query = *all.Query
+ o.Timezone = all.Timezone
+ o.ToTs = *all.ToTs
+
+ if len(additionalProperties) > 0 {
+ o.AdditionalProperties = additionalProperties
+ }
+
+ if hasInvalidField {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_create_request_data.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_create_request_data.go
new file mode 100644
index 0000000000..a7065a5688
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_create_request_data.go
@@ -0,0 +1,110 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV2
+
+import (
+ "fmt"
+
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// SloReportCreateRequestData The data portion of the SLO report request.
+type SloReportCreateRequestData struct {
+ // The attributes portion of the SLO report request.
+ Attributes SloReportCreateRequestAttributes `json:"attributes"`
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject map[string]interface{} `json:"-"`
+ AdditionalProperties map[string]interface{}
+}
+
+// NewSloReportCreateRequestData instantiates a new SloReportCreateRequestData object.
+// This constructor will assign default values to properties that have it defined,
+// and makes sure properties required by API are set, but the set of arguments
+// will change when the set of required properties is changed.
+func NewSloReportCreateRequestData(attributes SloReportCreateRequestAttributes) *SloReportCreateRequestData {
+ this := SloReportCreateRequestData{}
+ this.Attributes = attributes
+ return &this
+}
+
+// NewSloReportCreateRequestDataWithDefaults instantiates a new SloReportCreateRequestData object.
+// This constructor will only assign default values to properties that have it defined,
+// but it doesn't guarantee that properties required by API are set.
+func NewSloReportCreateRequestDataWithDefaults() *SloReportCreateRequestData {
+ this := SloReportCreateRequestData{}
+ return &this
+}
+
+// GetAttributes returns the Attributes field value.
+func (o *SloReportCreateRequestData) GetAttributes() SloReportCreateRequestAttributes {
+ if o == nil {
+ var ret SloReportCreateRequestAttributes
+ return ret
+ }
+ return o.Attributes
+}
+
+// GetAttributesOk returns a tuple with the Attributes field value
+// and a boolean to check if the value has been set.
+func (o *SloReportCreateRequestData) GetAttributesOk() (*SloReportCreateRequestAttributes, bool) {
+ if o == nil {
+ return nil, false
+ }
+ return &o.Attributes, true
+}
+
+// SetAttributes sets field value.
+func (o *SloReportCreateRequestData) SetAttributes(v SloReportCreateRequestAttributes) {
+ o.Attributes = v
+}
+
+// MarshalJSON serializes the struct using spec logic.
+func (o SloReportCreateRequestData) MarshalJSON() ([]byte, error) {
+ toSerialize := map[string]interface{}{}
+ if o.UnparsedObject != nil {
+ return datadog.Marshal(o.UnparsedObject)
+ }
+ toSerialize["attributes"] = o.Attributes
+
+ for key, value := range o.AdditionalProperties {
+ toSerialize[key] = value
+ }
+ return datadog.Marshal(toSerialize)
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (o *SloReportCreateRequestData) UnmarshalJSON(bytes []byte) (err error) {
+ all := struct {
+ Attributes *SloReportCreateRequestAttributes `json:"attributes"`
+ }{}
+ if err = datadog.Unmarshal(bytes, &all); err != nil {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+ if all.Attributes == nil {
+ return fmt.Errorf("required field attributes missing")
+ }
+ additionalProperties := make(map[string]interface{})
+ if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
+ datadog.DeleteKeys(additionalProperties, &[]string{"attributes"})
+ } else {
+ return err
+ }
+
+ hasInvalidField := false
+ if all.Attributes.UnparsedObject != nil && o.UnparsedObject == nil {
+ hasInvalidField = true
+ }
+ o.Attributes = *all.Attributes
+
+ if len(additionalProperties) > 0 {
+ o.AdditionalProperties = additionalProperties
+ }
+
+ if hasInvalidField {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_interval.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_interval.go
new file mode 100644
index 0000000000..4a16379124
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_interval.go
@@ -0,0 +1,66 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV2
+
+import (
+ "fmt"
+
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// SLOReportInterval The frequency at which report data is to be generated.
+type SLOReportInterval string
+
+// List of SLOReportInterval.
+const (
+ SLOREPORTINTERVAL_WEEKLY SLOReportInterval = "weekly"
+ SLOREPORTINTERVAL_MONTHLY SLOReportInterval = "monthly"
+)
+
+var allowedSLOReportIntervalEnumValues = []SLOReportInterval{
+ SLOREPORTINTERVAL_WEEKLY,
+ SLOREPORTINTERVAL_MONTHLY,
+}
+
+// GetAllowedValues reeturns the list of possible values.
+func (v *SLOReportInterval) GetAllowedValues() []SLOReportInterval {
+ return allowedSLOReportIntervalEnumValues
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (v *SLOReportInterval) UnmarshalJSON(src []byte) error {
+ var value string
+ err := datadog.Unmarshal(src, &value)
+ if err != nil {
+ return err
+ }
+ *v = SLOReportInterval(value)
+ return nil
+}
+
+// NewSLOReportIntervalFromValue returns a pointer to a valid SLOReportInterval
+// for the value passed as argument, or an error if the value passed is not allowed by the enum.
+func NewSLOReportIntervalFromValue(v string) (*SLOReportInterval, error) {
+ ev := SLOReportInterval(v)
+ if ev.IsValid() {
+ return &ev, nil
+ }
+ return nil, fmt.Errorf("invalid value '%v' for SLOReportInterval: valid values are %v", v, allowedSLOReportIntervalEnumValues)
+}
+
+// IsValid return true if the value is valid for the enum, false otherwise.
+func (v SLOReportInterval) IsValid() bool {
+ for _, existing := range allowedSLOReportIntervalEnumValues {
+ if existing == v {
+ return true
+ }
+ }
+ return false
+}
+
+// Ptr returns reference to SLOReportInterval value.
+func (v SLOReportInterval) Ptr() *SLOReportInterval {
+ return &v
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_post_response.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_post_response.go
new file mode 100644
index 0000000000..030c0cb428
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_post_response.go
@@ -0,0 +1,111 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV2
+
+import (
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// SLOReportPostResponse The SLO report response.
+type SLOReportPostResponse struct {
+ // The data portion of the SLO report response.
+ Data *SLOReportPostResponseData `json:"data,omitempty"`
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject map[string]interface{} `json:"-"`
+ AdditionalProperties map[string]interface{}
+}
+
+// NewSLOReportPostResponse instantiates a new SLOReportPostResponse object.
+// This constructor will assign default values to properties that have it defined,
+// and makes sure properties required by API are set, but the set of arguments
+// will change when the set of required properties is changed.
+func NewSLOReportPostResponse() *SLOReportPostResponse {
+ this := SLOReportPostResponse{}
+ return &this
+}
+
+// NewSLOReportPostResponseWithDefaults instantiates a new SLOReportPostResponse object.
+// This constructor will only assign default values to properties that have it defined,
+// but it doesn't guarantee that properties required by API are set.
+func NewSLOReportPostResponseWithDefaults() *SLOReportPostResponse {
+ this := SLOReportPostResponse{}
+ return &this
+}
+
+// GetData returns the Data field value if set, zero value otherwise.
+func (o *SLOReportPostResponse) GetData() SLOReportPostResponseData {
+ if o == nil || o.Data == nil {
+ var ret SLOReportPostResponseData
+ return ret
+ }
+ return *o.Data
+}
+
+// GetDataOk returns a tuple with the Data field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SLOReportPostResponse) GetDataOk() (*SLOReportPostResponseData, bool) {
+ if o == nil || o.Data == nil {
+ return nil, false
+ }
+ return o.Data, true
+}
+
+// HasData returns a boolean if a field has been set.
+func (o *SLOReportPostResponse) HasData() bool {
+ return o != nil && o.Data != nil
+}
+
+// SetData gets a reference to the given SLOReportPostResponseData and assigns it to the Data field.
+func (o *SLOReportPostResponse) SetData(v SLOReportPostResponseData) {
+ o.Data = &v
+}
+
+// MarshalJSON serializes the struct using spec logic.
+func (o SLOReportPostResponse) MarshalJSON() ([]byte, error) {
+ toSerialize := map[string]interface{}{}
+ if o.UnparsedObject != nil {
+ return datadog.Marshal(o.UnparsedObject)
+ }
+ if o.Data != nil {
+ toSerialize["data"] = o.Data
+ }
+
+ for key, value := range o.AdditionalProperties {
+ toSerialize[key] = value
+ }
+ return datadog.Marshal(toSerialize)
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (o *SLOReportPostResponse) UnmarshalJSON(bytes []byte) (err error) {
+ all := struct {
+ Data *SLOReportPostResponseData `json:"data,omitempty"`
+ }{}
+ if err = datadog.Unmarshal(bytes, &all); err != nil {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+ additionalProperties := make(map[string]interface{})
+ if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
+ datadog.DeleteKeys(additionalProperties, &[]string{"data"})
+ } else {
+ return err
+ }
+
+ hasInvalidField := false
+ if all.Data != nil && all.Data.UnparsedObject != nil && o.UnparsedObject == nil {
+ hasInvalidField = true
+ }
+ o.Data = all.Data
+
+ if len(additionalProperties) > 0 {
+ o.AdditionalProperties = additionalProperties
+ }
+
+ if hasInvalidField {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_post_response_data.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_post_response_data.go
new file mode 100644
index 0000000000..2fe894733c
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_post_response_data.go
@@ -0,0 +1,137 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV2
+
+import (
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// SLOReportPostResponseData The data portion of the SLO report response.
+type SLOReportPostResponseData struct {
+ // The ID of the report job.
+ Id *string `json:"id,omitempty"`
+ // The type of ID.
+ Type *string `json:"type,omitempty"`
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject map[string]interface{} `json:"-"`
+ AdditionalProperties map[string]interface{}
+}
+
+// NewSLOReportPostResponseData instantiates a new SLOReportPostResponseData object.
+// This constructor will assign default values to properties that have it defined,
+// and makes sure properties required by API are set, but the set of arguments
+// will change when the set of required properties is changed.
+func NewSLOReportPostResponseData() *SLOReportPostResponseData {
+ this := SLOReportPostResponseData{}
+ return &this
+}
+
+// NewSLOReportPostResponseDataWithDefaults instantiates a new SLOReportPostResponseData object.
+// This constructor will only assign default values to properties that have it defined,
+// but it doesn't guarantee that properties required by API are set.
+func NewSLOReportPostResponseDataWithDefaults() *SLOReportPostResponseData {
+ this := SLOReportPostResponseData{}
+ return &this
+}
+
+// GetId returns the Id field value if set, zero value otherwise.
+func (o *SLOReportPostResponseData) GetId() string {
+ if o == nil || o.Id == nil {
+ var ret string
+ return ret
+ }
+ return *o.Id
+}
+
+// GetIdOk returns a tuple with the Id field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SLOReportPostResponseData) GetIdOk() (*string, bool) {
+ if o == nil || o.Id == nil {
+ return nil, false
+ }
+ return o.Id, true
+}
+
+// HasId returns a boolean if a field has been set.
+func (o *SLOReportPostResponseData) HasId() bool {
+ return o != nil && o.Id != nil
+}
+
+// SetId gets a reference to the given string and assigns it to the Id field.
+func (o *SLOReportPostResponseData) SetId(v string) {
+ o.Id = &v
+}
+
+// GetType returns the Type field value if set, zero value otherwise.
+func (o *SLOReportPostResponseData) GetType() string {
+ if o == nil || o.Type == nil {
+ var ret string
+ return ret
+ }
+ return *o.Type
+}
+
+// GetTypeOk returns a tuple with the Type field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SLOReportPostResponseData) GetTypeOk() (*string, bool) {
+ if o == nil || o.Type == nil {
+ return nil, false
+ }
+ return o.Type, true
+}
+
+// HasType returns a boolean if a field has been set.
+func (o *SLOReportPostResponseData) HasType() bool {
+ return o != nil && o.Type != nil
+}
+
+// SetType gets a reference to the given string and assigns it to the Type field.
+func (o *SLOReportPostResponseData) SetType(v string) {
+ o.Type = &v
+}
+
+// MarshalJSON serializes the struct using spec logic.
+func (o SLOReportPostResponseData) MarshalJSON() ([]byte, error) {
+ toSerialize := map[string]interface{}{}
+ if o.UnparsedObject != nil {
+ return datadog.Marshal(o.UnparsedObject)
+ }
+ if o.Id != nil {
+ toSerialize["id"] = o.Id
+ }
+ if o.Type != nil {
+ toSerialize["type"] = o.Type
+ }
+
+ for key, value := range o.AdditionalProperties {
+ toSerialize[key] = value
+ }
+ return datadog.Marshal(toSerialize)
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (o *SLOReportPostResponseData) UnmarshalJSON(bytes []byte) (err error) {
+ all := struct {
+ Id *string `json:"id,omitempty"`
+ Type *string `json:"type,omitempty"`
+ }{}
+ if err = datadog.Unmarshal(bytes, &all); err != nil {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+ additionalProperties := make(map[string]interface{})
+ if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
+ datadog.DeleteKeys(additionalProperties, &[]string{"id", "type"})
+ } else {
+ return err
+ }
+ o.Id = all.Id
+ o.Type = all.Type
+
+ if len(additionalProperties) > 0 {
+ o.AdditionalProperties = additionalProperties
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_status.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_status.go
new file mode 100644
index 0000000000..3788ff403f
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_status.go
@@ -0,0 +1,70 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV2
+
+import (
+ "fmt"
+
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// SLOReportStatus The status of the SLO report job.
+type SLOReportStatus string
+
+// List of SLOReportStatus.
+const (
+ SLOREPORTSTATUS_IN_PROGRESS SLOReportStatus = "in_progress"
+ SLOREPORTSTATUS_COMPLETED SLOReportStatus = "completed"
+ SLOREPORTSTATUS_COMPLETED_WITH_ERRORS SLOReportStatus = "completed_with_errors"
+ SLOREPORTSTATUS_FAILED SLOReportStatus = "failed"
+)
+
+var allowedSLOReportStatusEnumValues = []SLOReportStatus{
+ SLOREPORTSTATUS_IN_PROGRESS,
+ SLOREPORTSTATUS_COMPLETED,
+ SLOREPORTSTATUS_COMPLETED_WITH_ERRORS,
+ SLOREPORTSTATUS_FAILED,
+}
+
+// GetAllowedValues reeturns the list of possible values.
+func (v *SLOReportStatus) GetAllowedValues() []SLOReportStatus {
+ return allowedSLOReportStatusEnumValues
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (v *SLOReportStatus) UnmarshalJSON(src []byte) error {
+ var value string
+ err := datadog.Unmarshal(src, &value)
+ if err != nil {
+ return err
+ }
+ *v = SLOReportStatus(value)
+ return nil
+}
+
+// NewSLOReportStatusFromValue returns a pointer to a valid SLOReportStatus
+// for the value passed as argument, or an error if the value passed is not allowed by the enum.
+func NewSLOReportStatusFromValue(v string) (*SLOReportStatus, error) {
+ ev := SLOReportStatus(v)
+ if ev.IsValid() {
+ return &ev, nil
+ }
+ return nil, fmt.Errorf("invalid value '%v' for SLOReportStatus: valid values are %v", v, allowedSLOReportStatusEnumValues)
+}
+
+// IsValid return true if the value is valid for the enum, false otherwise.
+func (v SLOReportStatus) IsValid() bool {
+ for _, existing := range allowedSLOReportStatusEnumValues {
+ if existing == v {
+ return true
+ }
+ }
+ return false
+}
+
+// Ptr returns reference to SLOReportStatus value.
+func (v SLOReportStatus) Ptr() *SLOReportStatus {
+ return &v
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_status_get_response.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_status_get_response.go
new file mode 100644
index 0000000000..72041cb56a
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_status_get_response.go
@@ -0,0 +1,111 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV2
+
+import (
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// SLOReportStatusGetResponse The SLO report status response.
+type SLOReportStatusGetResponse struct {
+ // The data portion of the SLO report status response.
+ Data *SLOReportStatusGetResponseData `json:"data,omitempty"`
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject map[string]interface{} `json:"-"`
+ AdditionalProperties map[string]interface{}
+}
+
+// NewSLOReportStatusGetResponse instantiates a new SLOReportStatusGetResponse object.
+// This constructor will assign default values to properties that have it defined,
+// and makes sure properties required by API are set, but the set of arguments
+// will change when the set of required properties is changed.
+func NewSLOReportStatusGetResponse() *SLOReportStatusGetResponse {
+ this := SLOReportStatusGetResponse{}
+ return &this
+}
+
+// NewSLOReportStatusGetResponseWithDefaults instantiates a new SLOReportStatusGetResponse object.
+// This constructor will only assign default values to properties that have it defined,
+// but it doesn't guarantee that properties required by API are set.
+func NewSLOReportStatusGetResponseWithDefaults() *SLOReportStatusGetResponse {
+ this := SLOReportStatusGetResponse{}
+ return &this
+}
+
+// GetData returns the Data field value if set, zero value otherwise.
+func (o *SLOReportStatusGetResponse) GetData() SLOReportStatusGetResponseData {
+ if o == nil || o.Data == nil {
+ var ret SLOReportStatusGetResponseData
+ return ret
+ }
+ return *o.Data
+}
+
+// GetDataOk returns a tuple with the Data field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SLOReportStatusGetResponse) GetDataOk() (*SLOReportStatusGetResponseData, bool) {
+ if o == nil || o.Data == nil {
+ return nil, false
+ }
+ return o.Data, true
+}
+
+// HasData returns a boolean if a field has been set.
+func (o *SLOReportStatusGetResponse) HasData() bool {
+ return o != nil && o.Data != nil
+}
+
+// SetData gets a reference to the given SLOReportStatusGetResponseData and assigns it to the Data field.
+func (o *SLOReportStatusGetResponse) SetData(v SLOReportStatusGetResponseData) {
+ o.Data = &v
+}
+
+// MarshalJSON serializes the struct using spec logic.
+func (o SLOReportStatusGetResponse) MarshalJSON() ([]byte, error) {
+ toSerialize := map[string]interface{}{}
+ if o.UnparsedObject != nil {
+ return datadog.Marshal(o.UnparsedObject)
+ }
+ if o.Data != nil {
+ toSerialize["data"] = o.Data
+ }
+
+ for key, value := range o.AdditionalProperties {
+ toSerialize[key] = value
+ }
+ return datadog.Marshal(toSerialize)
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (o *SLOReportStatusGetResponse) UnmarshalJSON(bytes []byte) (err error) {
+ all := struct {
+ Data *SLOReportStatusGetResponseData `json:"data,omitempty"`
+ }{}
+ if err = datadog.Unmarshal(bytes, &all); err != nil {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+ additionalProperties := make(map[string]interface{})
+ if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
+ datadog.DeleteKeys(additionalProperties, &[]string{"data"})
+ } else {
+ return err
+ }
+
+ hasInvalidField := false
+ if all.Data != nil && all.Data.UnparsedObject != nil && o.UnparsedObject == nil {
+ hasInvalidField = true
+ }
+ o.Data = all.Data
+
+ if len(additionalProperties) > 0 {
+ o.AdditionalProperties = additionalProperties
+ }
+
+ if hasInvalidField {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_status_get_response_attributes.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_status_get_response_attributes.go
new file mode 100644
index 0000000000..a458a12500
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_status_get_response_attributes.go
@@ -0,0 +1,112 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV2
+
+import (
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// SLOReportStatusGetResponseAttributes The attributes portion of the SLO report status response.
+type SLOReportStatusGetResponseAttributes struct {
+ // The status of the SLO report job.
+ Status *SLOReportStatus `json:"status,omitempty"`
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject map[string]interface{} `json:"-"`
+ AdditionalProperties map[string]interface{}
+}
+
+// NewSLOReportStatusGetResponseAttributes instantiates a new SLOReportStatusGetResponseAttributes object.
+// This constructor will assign default values to properties that have it defined,
+// and makes sure properties required by API are set, but the set of arguments
+// will change when the set of required properties is changed.
+func NewSLOReportStatusGetResponseAttributes() *SLOReportStatusGetResponseAttributes {
+ this := SLOReportStatusGetResponseAttributes{}
+ return &this
+}
+
+// NewSLOReportStatusGetResponseAttributesWithDefaults instantiates a new SLOReportStatusGetResponseAttributes object.
+// This constructor will only assign default values to properties that have it defined,
+// but it doesn't guarantee that properties required by API are set.
+func NewSLOReportStatusGetResponseAttributesWithDefaults() *SLOReportStatusGetResponseAttributes {
+ this := SLOReportStatusGetResponseAttributes{}
+ return &this
+}
+
+// GetStatus returns the Status field value if set, zero value otherwise.
+func (o *SLOReportStatusGetResponseAttributes) GetStatus() SLOReportStatus {
+ if o == nil || o.Status == nil {
+ var ret SLOReportStatus
+ return ret
+ }
+ return *o.Status
+}
+
+// GetStatusOk returns a tuple with the Status field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SLOReportStatusGetResponseAttributes) GetStatusOk() (*SLOReportStatus, bool) {
+ if o == nil || o.Status == nil {
+ return nil, false
+ }
+ return o.Status, true
+}
+
+// HasStatus returns a boolean if a field has been set.
+func (o *SLOReportStatusGetResponseAttributes) HasStatus() bool {
+ return o != nil && o.Status != nil
+}
+
+// SetStatus gets a reference to the given SLOReportStatus and assigns it to the Status field.
+func (o *SLOReportStatusGetResponseAttributes) SetStatus(v SLOReportStatus) {
+ o.Status = &v
+}
+
+// MarshalJSON serializes the struct using spec logic.
+func (o SLOReportStatusGetResponseAttributes) MarshalJSON() ([]byte, error) {
+ toSerialize := map[string]interface{}{}
+ if o.UnparsedObject != nil {
+ return datadog.Marshal(o.UnparsedObject)
+ }
+ if o.Status != nil {
+ toSerialize["status"] = o.Status
+ }
+
+ for key, value := range o.AdditionalProperties {
+ toSerialize[key] = value
+ }
+ return datadog.Marshal(toSerialize)
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (o *SLOReportStatusGetResponseAttributes) UnmarshalJSON(bytes []byte) (err error) {
+ all := struct {
+ Status *SLOReportStatus `json:"status,omitempty"`
+ }{}
+ if err = datadog.Unmarshal(bytes, &all); err != nil {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+ additionalProperties := make(map[string]interface{})
+ if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
+ datadog.DeleteKeys(additionalProperties, &[]string{"status"})
+ } else {
+ return err
+ }
+
+ hasInvalidField := false
+ if all.Status != nil && !all.Status.IsValid() {
+ hasInvalidField = true
+ } else {
+ o.Status = all.Status
+ }
+
+ if len(additionalProperties) > 0 {
+ o.AdditionalProperties = additionalProperties
+ }
+
+ if hasInvalidField {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_status_get_response_data.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_status_get_response_data.go
new file mode 100644
index 0000000000..46a498beec
--- /dev/null
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_slo_report_status_get_response_data.go
@@ -0,0 +1,181 @@
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2019-Present Datadog, Inc.
+
+package datadogV2
+
+import (
+ "github.com/DataDog/datadog-api-client-go/v2/api/datadog"
+)
+
+// SLOReportStatusGetResponseData The data portion of the SLO report status response.
+type SLOReportStatusGetResponseData struct {
+ // The attributes portion of the SLO report status response.
+ Attributes *SLOReportStatusGetResponseAttributes `json:"attributes,omitempty"`
+ // The ID of the report job.
+ Id *string `json:"id,omitempty"`
+ // The type of ID.
+ Type *string `json:"type,omitempty"`
+ // UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
+ UnparsedObject map[string]interface{} `json:"-"`
+ AdditionalProperties map[string]interface{}
+}
+
+// NewSLOReportStatusGetResponseData instantiates a new SLOReportStatusGetResponseData object.
+// This constructor will assign default values to properties that have it defined,
+// and makes sure properties required by API are set, but the set of arguments
+// will change when the set of required properties is changed.
+func NewSLOReportStatusGetResponseData() *SLOReportStatusGetResponseData {
+ this := SLOReportStatusGetResponseData{}
+ return &this
+}
+
+// NewSLOReportStatusGetResponseDataWithDefaults instantiates a new SLOReportStatusGetResponseData object.
+// This constructor will only assign default values to properties that have it defined,
+// but it doesn't guarantee that properties required by API are set.
+func NewSLOReportStatusGetResponseDataWithDefaults() *SLOReportStatusGetResponseData {
+ this := SLOReportStatusGetResponseData{}
+ return &this
+}
+
+// GetAttributes returns the Attributes field value if set, zero value otherwise.
+func (o *SLOReportStatusGetResponseData) GetAttributes() SLOReportStatusGetResponseAttributes {
+ if o == nil || o.Attributes == nil {
+ var ret SLOReportStatusGetResponseAttributes
+ return ret
+ }
+ return *o.Attributes
+}
+
+// GetAttributesOk returns a tuple with the Attributes field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SLOReportStatusGetResponseData) GetAttributesOk() (*SLOReportStatusGetResponseAttributes, bool) {
+ if o == nil || o.Attributes == nil {
+ return nil, false
+ }
+ return o.Attributes, true
+}
+
+// HasAttributes returns a boolean if a field has been set.
+func (o *SLOReportStatusGetResponseData) HasAttributes() bool {
+ return o != nil && o.Attributes != nil
+}
+
+// SetAttributes gets a reference to the given SLOReportStatusGetResponseAttributes and assigns it to the Attributes field.
+func (o *SLOReportStatusGetResponseData) SetAttributes(v SLOReportStatusGetResponseAttributes) {
+ o.Attributes = &v
+}
+
+// GetId returns the Id field value if set, zero value otherwise.
+func (o *SLOReportStatusGetResponseData) GetId() string {
+ if o == nil || o.Id == nil {
+ var ret string
+ return ret
+ }
+ return *o.Id
+}
+
+// GetIdOk returns a tuple with the Id field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SLOReportStatusGetResponseData) GetIdOk() (*string, bool) {
+ if o == nil || o.Id == nil {
+ return nil, false
+ }
+ return o.Id, true
+}
+
+// HasId returns a boolean if a field has been set.
+func (o *SLOReportStatusGetResponseData) HasId() bool {
+ return o != nil && o.Id != nil
+}
+
+// SetId gets a reference to the given string and assigns it to the Id field.
+func (o *SLOReportStatusGetResponseData) SetId(v string) {
+ o.Id = &v
+}
+
+// GetType returns the Type field value if set, zero value otherwise.
+func (o *SLOReportStatusGetResponseData) GetType() string {
+ if o == nil || o.Type == nil {
+ var ret string
+ return ret
+ }
+ return *o.Type
+}
+
+// GetTypeOk returns a tuple with the Type field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *SLOReportStatusGetResponseData) GetTypeOk() (*string, bool) {
+ if o == nil || o.Type == nil {
+ return nil, false
+ }
+ return o.Type, true
+}
+
+// HasType returns a boolean if a field has been set.
+func (o *SLOReportStatusGetResponseData) HasType() bool {
+ return o != nil && o.Type != nil
+}
+
+// SetType gets a reference to the given string and assigns it to the Type field.
+func (o *SLOReportStatusGetResponseData) SetType(v string) {
+ o.Type = &v
+}
+
+// MarshalJSON serializes the struct using spec logic.
+func (o SLOReportStatusGetResponseData) MarshalJSON() ([]byte, error) {
+ toSerialize := map[string]interface{}{}
+ if o.UnparsedObject != nil {
+ return datadog.Marshal(o.UnparsedObject)
+ }
+ if o.Attributes != nil {
+ toSerialize["attributes"] = o.Attributes
+ }
+ if o.Id != nil {
+ toSerialize["id"] = o.Id
+ }
+ if o.Type != nil {
+ toSerialize["type"] = o.Type
+ }
+
+ for key, value := range o.AdditionalProperties {
+ toSerialize[key] = value
+ }
+ return datadog.Marshal(toSerialize)
+}
+
+// UnmarshalJSON deserializes the given payload.
+func (o *SLOReportStatusGetResponseData) UnmarshalJSON(bytes []byte) (err error) {
+ all := struct {
+ Attributes *SLOReportStatusGetResponseAttributes `json:"attributes,omitempty"`
+ Id *string `json:"id,omitempty"`
+ Type *string `json:"type,omitempty"`
+ }{}
+ if err = datadog.Unmarshal(bytes, &all); err != nil {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+ additionalProperties := make(map[string]interface{})
+ if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
+ datadog.DeleteKeys(additionalProperties, &[]string{"attributes", "id", "type"})
+ } else {
+ return err
+ }
+
+ hasInvalidField := false
+ if all.Attributes != nil && all.Attributes.UnparsedObject != nil && o.UnparsedObject == nil {
+ hasInvalidField = true
+ }
+ o.Attributes = all.Attributes
+ o.Id = all.Id
+ o.Type = all.Type
+
+ if len(additionalProperties) > 0 {
+ o.AdditionalProperties = additionalProperties
+ }
+
+ if hasInvalidField {
+ return datadog.Unmarshal(bytes, &o.UnparsedObject)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_timeseries_response_series.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_timeseries_response_series.go
index cb367b3c63..40491463d3 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_timeseries_response_series.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_timeseries_response_series.go
@@ -97,9 +97,9 @@ func (o *TimeseriesResponseSeries) SetQueryIndex(v int32) {
o.QueryIndex = &v
}
-// GetUnit returns the Unit field value if set, zero value otherwise.
+// GetUnit returns the Unit field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *TimeseriesResponseSeries) GetUnit() []Unit {
- if o == nil || o.Unit == nil {
+ if o == nil {
var ret []Unit
return ret
}
@@ -108,6 +108,7 @@ func (o *TimeseriesResponseSeries) GetUnit() []Unit {
// GetUnitOk returns a tuple with the Unit field value if set, nil otherwise
// and a boolean to check if the value has been set.
+// NOTE: If the value is an explicit nil, `nil, true` will be returned.
func (o *TimeseriesResponseSeries) GetUnitOk() (*[]Unit, bool) {
if o == nil || o.Unit == nil {
return nil, false
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_user_team_included.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_user_team_included.go
index 4963747898..f66c29ae3c 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_user_team_included.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_user_team_included.go
@@ -11,6 +11,7 @@ import (
// UserTeamIncluded - Included resources related to the team membership
type UserTeamIncluded struct {
User *User
+ Team *Team
// UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
UnparsedObject interface{}
@@ -21,6 +22,11 @@ func UserAsUserTeamIncluded(v *User) UserTeamIncluded {
return UserTeamIncluded{User: v}
}
+// TeamAsUserTeamIncluded is a convenience function that returns Team wrapped in UserTeamIncluded.
+func TeamAsUserTeamIncluded(v *Team) UserTeamIncluded {
+ return UserTeamIncluded{Team: v}
+}
+
// UnmarshalJSON turns data into one of the pointers in the struct.
func (obj *UserTeamIncluded) UnmarshalJSON(data []byte) error {
var err error
@@ -42,9 +48,27 @@ func (obj *UserTeamIncluded) UnmarshalJSON(data []byte) error {
obj.User = nil
}
+ // try to unmarshal data into Team
+ err = datadog.Unmarshal(data, &obj.Team)
+ if err == nil {
+ if obj.Team != nil && obj.Team.UnparsedObject == nil {
+ jsonTeam, _ := datadog.Marshal(obj.Team)
+ if string(jsonTeam) == "{}" { // empty struct
+ obj.Team = nil
+ } else {
+ match++
+ }
+ } else {
+ obj.Team = nil
+ }
+ } else {
+ obj.Team = nil
+ }
+
if match != 1 { // more than 1 match
// reset to nil
obj.User = nil
+ obj.Team = nil
return datadog.Unmarshal(data, &obj.UnparsedObject)
}
return nil // exactly one match
@@ -56,6 +80,10 @@ func (obj UserTeamIncluded) MarshalJSON() ([]byte, error) {
return datadog.Marshal(&obj.User)
}
+ if obj.Team != nil {
+ return datadog.Marshal(&obj.Team)
+ }
+
if obj.UnparsedObject != nil {
return datadog.Marshal(obj.UnparsedObject)
}
@@ -68,6 +96,10 @@ func (obj *UserTeamIncluded) GetActualInstance() interface{} {
return obj.User
}
+ if obj.Team != nil {
+ return obj.Team
+ }
+
// all schemas are nil
return nil
}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_user_teams_response.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_user_teams_response.go
index d5a5eb53c9..abed90f75b 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_user_teams_response.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/api/datadogV2/model_user_teams_response.go
@@ -12,6 +12,8 @@ import (
type UserTeamsResponse struct {
// Team memberships response data
Data []UserTeam `json:"data,omitempty"`
+ // Resources related to the team memberships
+ Included []UserTeamIncluded `json:"included,omitempty"`
// Teams response links.
Links *TeamsResponseLinks `json:"links,omitempty"`
// Teams response metadata.
@@ -66,6 +68,34 @@ func (o *UserTeamsResponse) SetData(v []UserTeam) {
o.Data = v
}
+// GetIncluded returns the Included field value if set, zero value otherwise.
+func (o *UserTeamsResponse) GetIncluded() []UserTeamIncluded {
+ if o == nil || o.Included == nil {
+ var ret []UserTeamIncluded
+ return ret
+ }
+ return o.Included
+}
+
+// GetIncludedOk returns a tuple with the Included field value if set, nil otherwise
+// and a boolean to check if the value has been set.
+func (o *UserTeamsResponse) GetIncludedOk() (*[]UserTeamIncluded, bool) {
+ if o == nil || o.Included == nil {
+ return nil, false
+ }
+ return &o.Included, true
+}
+
+// HasIncluded returns a boolean if a field has been set.
+func (o *UserTeamsResponse) HasIncluded() bool {
+ return o != nil && o.Included != nil
+}
+
+// SetIncluded gets a reference to the given []UserTeamIncluded and assigns it to the Included field.
+func (o *UserTeamsResponse) SetIncluded(v []UserTeamIncluded) {
+ o.Included = v
+}
+
// GetLinks returns the Links field value if set, zero value otherwise.
func (o *UserTeamsResponse) GetLinks() TeamsResponseLinks {
if o == nil || o.Links == nil {
@@ -131,6 +161,9 @@ func (o UserTeamsResponse) MarshalJSON() ([]byte, error) {
if o.Data != nil {
toSerialize["data"] = o.Data
}
+ if o.Included != nil {
+ toSerialize["included"] = o.Included
+ }
if o.Links != nil {
toSerialize["links"] = o.Links
}
@@ -147,22 +180,24 @@ func (o UserTeamsResponse) MarshalJSON() ([]byte, error) {
// UnmarshalJSON deserializes the given payload.
func (o *UserTeamsResponse) UnmarshalJSON(bytes []byte) (err error) {
all := struct {
- Data []UserTeam `json:"data,omitempty"`
- Links *TeamsResponseLinks `json:"links,omitempty"`
- Meta *TeamsResponseMeta `json:"meta,omitempty"`
+ Data []UserTeam `json:"data,omitempty"`
+ Included []UserTeamIncluded `json:"included,omitempty"`
+ Links *TeamsResponseLinks `json:"links,omitempty"`
+ Meta *TeamsResponseMeta `json:"meta,omitempty"`
}{}
if err = datadog.Unmarshal(bytes, &all); err != nil {
return datadog.Unmarshal(bytes, &o.UnparsedObject)
}
additionalProperties := make(map[string]interface{})
if err = datadog.Unmarshal(bytes, &additionalProperties); err == nil {
- datadog.DeleteKeys(additionalProperties, &[]string{"data", "links", "meta"})
+ datadog.DeleteKeys(additionalProperties, &[]string{"data", "included", "links", "meta"})
} else {
return err
}
hasInvalidField := false
o.Data = all.Data
+ o.Included = all.Included
if all.Links != nil && all.Links.UnparsedObject != nil && o.UnparsedObject == nil {
hasInvalidField = true
}
diff --git a/vendor/github.com/DataDog/datadog-api-client-go/v2/version.go b/vendor/github.com/DataDog/datadog-api-client-go/v2/version.go
index 0166d57eaa..2a5f79cad2 100644
--- a/vendor/github.com/DataDog/datadog-api-client-go/v2/version.go
+++ b/vendor/github.com/DataDog/datadog-api-client-go/v2/version.go
@@ -1,4 +1,4 @@
package client
// Version used in User-Agent header.
-const Version = "2.24.0"
+const Version = "2.26.0"
diff --git a/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/.gitignore b/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/.gitignore
new file mode 100644
index 0000000000..54aea5c3bf
--- /dev/null
+++ b/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/.gitignore
@@ -0,0 +1 @@
+example/example-sds-go
diff --git a/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/LICENSE b/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/LICENSE
new file mode 100644
index 0000000000..ec868ff168
--- /dev/null
+++ b/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2024-present Datadog, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/build.go b/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/build.go
new file mode 100644
index 0000000000..c6fc0dc5bc
--- /dev/null
+++ b/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/build.go
@@ -0,0 +1,9 @@
+package sds
+
+/*
+#cgo LDFLAGS: -L../rust/target/release -lsds_go
+
+#include
+#include
+*/
+import "C"
diff --git a/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/rule.go b/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/rule.go
new file mode 100644
index 0000000000..e15ee23e63
--- /dev/null
+++ b/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/rule.go
@@ -0,0 +1,169 @@
+package sds
+
+import (
+ "encoding/json"
+)
+
+type MatchActionType string
+
+const (
+ MatchActionNone = MatchActionType("None")
+ MatchActionRedact = MatchActionType("Redact")
+ MatchActionHash = MatchActionType("Hash")
+ MatchActionPartialRedact = MatchActionType("PartialRedact")
+)
+
+type SecondaryValidator string
+
+const (
+ LuhnChecksum = SecondaryValidator("LuhnChecksum")
+ ChineseIdChecksum = SecondaryValidator("ChineseIdChecksum")
+)
+
+type PartialRedactionDirection string
+
+const (
+ FirstCharacters = PartialRedactionDirection("FirstCharacters")
+ LastCharacters = PartialRedactionDirection("LastCharacters")
+)
+
+// Rule is sent to the core library to create scanners.
+type Rule struct {
+ Id string `json:"id"`
+ Pattern string `json:"pattern"`
+ MatchAction MatchAction `json:"match_action"`
+ ProximityKeywords *ProximityKeywordsConfig `json:"proximity_keywords,omitempty"`
+ SecondaryValidator SecondaryValidator `json:"validator,omitempty"`
+}
+
+// ExtraConfig is used to provide more configuration while creating the rules.
+type ExtraConfig struct {
+ ProximityKeywords *ProximityKeywordsConfig
+ SecondaryValidator SecondaryValidator
+}
+
+// CreateProximityKeywordsConfig creates a ProximityKeywordsConfig.
+func CreateProximityKeywordsConfig(lookAheadCharaceterCount uint32, includedKeywords []string, excludedKeywords []string) *ProximityKeywordsConfig {
+ if includedKeywords == nil {
+ includedKeywords = []string{}
+ }
+ if excludedKeywords == nil {
+ excludedKeywords = []string{}
+ }
+ return &ProximityKeywordsConfig{
+ LookAheadCharacterCount: lookAheadCharaceterCount,
+ IncludedKeywords: includedKeywords,
+ ExcludedKeywords: excludedKeywords,
+ }
+}
+
+// ProximityKeywordsConfig represents the proximity keyword matching
+// for the core library.
+type ProximityKeywordsConfig struct {
+ LookAheadCharacterCount uint32 `json:"look_ahead_character_count"`
+ IncludedKeywords []string `json:"included_keywords"`
+ ExcludedKeywords []string `json:"excluded_keywords"`
+}
+
+// RuleMatch stores the matches reported by the core library.
+type RuleMatch struct {
+ RuleIdx uint32
+ // TODO(remy): not implemented yet.
+ Path string
+ ReplacementType MatchAction
+ StartIndex uint32
+ EndIndexExclusive uint32
+ ShiftOffset int32
+}
+
+// MatchAction is used to configure the rules.
+type MatchAction struct {
+ Type MatchActionType
+ // used when Type == MatchActionRedact, empty otherwise
+ RedactionValue string
+ // used when Type == MatchActionPartialRedact, empty otherwise
+ CharacterCount uint32
+ // used when Type == MatchActionPartialRedact, empty otherwise
+ Direction PartialRedactionDirection
+}
+
+// NewMatchingRule returns a matching rule with no match _action_.
+func NewMatchingRule(id string, pattern string, extraConfig ExtraConfig) Rule {
+ return Rule{
+ Id: id,
+ Pattern: pattern,
+ MatchAction: MatchAction{
+ Type: MatchActionNone,
+ },
+ ProximityKeywords: extraConfig.ProximityKeywords,
+ SecondaryValidator: extraConfig.SecondaryValidator,
+ }
+}
+
+// NewRedactingRule returns a matching rule redacting events.
+func NewRedactingRule(id string, pattern string, redactionValue string, extraConfig ExtraConfig) Rule {
+ return Rule{
+ Id: id,
+ Pattern: pattern,
+ MatchAction: MatchAction{
+ Type: MatchActionRedact,
+ RedactionValue: redactionValue,
+ },
+ ProximityKeywords: extraConfig.ProximityKeywords,
+ SecondaryValidator: extraConfig.SecondaryValidator,
+ }
+}
+
+// NewHashRule returns a matching rule redacting with hashes.
+func NewHashRule(id string, pattern string, extraConfig ExtraConfig) Rule {
+ return Rule{
+ Id: id,
+ Pattern: pattern,
+ MatchAction: MatchAction{
+ Type: MatchActionHash,
+ },
+ ProximityKeywords: extraConfig.ProximityKeywords,
+ SecondaryValidator: extraConfig.SecondaryValidator,
+ }
+}
+
+// NewPartialRedactRule returns a matching rule partially redacting matches.
+func NewPartialRedactRule(id string, pattern string, characterCount uint32, direction PartialRedactionDirection, extraConfig ExtraConfig) Rule {
+ return Rule{
+ Id: id,
+ Pattern: pattern,
+ MatchAction: MatchAction{
+ Type: MatchActionPartialRedact,
+ CharacterCount: characterCount,
+ Direction: direction,
+ },
+ ProximityKeywords: extraConfig.ProximityKeywords,
+ SecondaryValidator: extraConfig.SecondaryValidator,
+ }
+}
+
+// MarshalJSON marshales the SecondaryValidator.
+func (s SecondaryValidator) MarshalJSON() ([]byte, error) {
+ return json.Marshal(map[string]string{
+ "type": string(s),
+ })
+}
+
+// MarshalJSON marshals the MatchAction in a format understood by the serde rust
+// JSON library.
+func (m MatchAction) MarshalJSON() ([]byte, error) {
+ o := map[string]interface{}{
+ "type": string(m.Type), // serde (rust) will use this field to know what to use for the enum
+ "match_action": string(m.Type),
+ }
+
+ switch m.Type {
+ case MatchActionRedact:
+ o["replacement"] = m.RedactionValue
+ case MatchActionPartialRedact:
+ o["character_count"] = m.CharacterCount
+ o["direction"] = string(m.Direction)
+ }
+
+ return json.Marshal(o)
+}
diff --git a/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/scanner.go b/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/scanner.go
new file mode 100644
index 0000000000..3566c2da14
--- /dev/null
+++ b/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/scanner.go
@@ -0,0 +1,345 @@
+package sds
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "unsafe"
+)
+
+/*
+#include
+#include
+*/
+import "C"
+
+var (
+ ErrUnknown error = fmt.Errorf("unknown error")
+ ErrInvalidRegex error = fmt.Errorf("invalid regex")
+ ErrInvalidKeywords error = fmt.Errorf("invalid keywords")
+ ErrInvalidMatchAction error = fmt.Errorf("invalid match action")
+)
+
+// Scanner wraps an SDS scanner.
+// See `CreateScanner` to create one providing SDS rules.
+// See `Scan`, `ScanEventsList` or a `ScanEventsMap` for usage.
+type Scanner struct {
+ // Id of this scanner generated by the SDS library when the scanner is created.
+ Id int64
+ // They are stored on creation for read-only usage.
+ Rules []Rule
+}
+
+// ScanResult contains a Scan result.
+type ScanResult struct {
+ // Event contains the event after the scan.
+ // If `Mutated` is true:
+ // * it contains the processed event after redaction.
+ // If `Mutated` is false:
+ // * it contains the original event, unchanged.
+ Event []byte
+
+ scanResult
+}
+
+type scanResult struct {
+ // Mutated indicates if the processed event has been
+ // mutated or not (e.g. redacted).
+ Mutated bool
+ // Matches contains all rule matches if any.
+ Matches []RuleMatch
+}
+
+// CreateScanner creates a scanner in the underlying SDS shared library. The library
+// only returns an ID to then address what scanner to use on Scan calls. This ID is
+// stored in the Scanner Go object for convenience. See `Scan` to process events.
+// The rules used to create the Scanner are stored as a read-only information in the
+// returned Scanner.
+func CreateScanner(rules []Rule) (*Scanner, error) {
+ if len(rules) == 0 {
+ return nil, fmt.Errorf("no rules provided")
+ }
+
+ data, err := json.Marshal(rules)
+ if err != nil {
+ return nil, err
+ }
+
+ cdata := C.CString(string(data)) // this call adds the 0, memory has to be freed
+ defer C.free(unsafe.Pointer(cdata))
+
+ var errorString *C.char
+
+ id := C.create_scanner(cdata, &errorString)
+
+ if id < 0 {
+ switch id {
+ // see rust/native/create_scanner.rs for the mapping.
+ case -1: // rust unknown error
+ return nil, ErrUnknown
+ case -2: // rust: CreateScannerError::InvalidRegex
+ return nil, ErrInvalidRegex
+ case -3: // rust: CreateScannerError::InvalidKeywords
+ return nil, ErrInvalidKeywords
+ case -4: // rust: CreateScannerError::InvalidMatchAction
+ return nil, ErrInvalidMatchAction
+ case -5: // rust panic
+ if errorString != nil {
+ defer C.free_string(errorString)
+ return nil, fmt.Errorf("internal panic: %v", C.GoString(errorString))
+ } else {
+ return nil, fmt.Errorf("internal panic")
+ }
+ }
+
+ return nil, ErrUnknown
+ }
+
+ return &Scanner{
+ Id: int64(id),
+ Rules: rules,
+ }, nil
+}
+
+// Delete deletes the instance of the current Scanner.
+// The current Scanner should not be reused.
+func (s *Scanner) Delete() {
+ C.delete_scanner(C.long(s.Id))
+ s.Id = 0
+ s.Rules = nil
+}
+
+func (s *Scanner) scanEncodedEvent(encodedEvent []byte) (ScanResult, error) {
+ cdata := C.CBytes(encodedEvent)
+ defer C.free(cdata)
+
+ var retsize int64
+ var retcap int64
+ var errorString *C.char
+
+ rvdata := C.scan(C.long(s.Id), cdata, C.long(len(encodedEvent)), (*C.long)(unsafe.Pointer(&retsize)), (*C.long)(unsafe.Pointer(&retcap)), &errorString)
+ if errorString != nil {
+ defer C.free_string(errorString)
+ return ScanResult{}, fmt.Errorf("internal panic: %v", C.GoString(errorString))
+ }
+
+ // nothing has matched, ignore the returned object
+ if retsize <= 0 || retcap <= 0 {
+ return ScanResult{}, nil
+ }
+
+ // otherwise we received data initially owned by rust, once we've used it,
+ // use `free_vec` to let know rust it can drop this memory.
+ defer C.free_vec(rvdata, C.long(retsize), C.long(retcap))
+
+ // Note that in the Go 1.21 documentation, GoBytes is part of:
+ // > A few special functions convert between Go and C types by making copies of the data.
+ // Meaning that the data in `rv` is a copy owned by Go of what's in rvdata.
+ response := C.GoBytes(unsafe.Pointer(rvdata), C.int(retsize))
+
+ // prepare and return the result
+
+ result, err := decodeResponse(response)
+
+ if err != nil {
+ return ScanResult{}, fmt.Errorf("scan: %v", err)
+ }
+
+ return result, nil
+}
+
+// Scan sends the string event to the SDS shared library for processing.
+func (s *Scanner) Scan(event []byte) (ScanResult, error) {
+ encodedEvent := make([]byte, 0)
+ encodedEvent, err := encodeStringEvent(event, encodedEvent)
+ if err != nil {
+ return ScanResult{}, err
+ }
+
+ var result ScanResult
+ if result, err = s.scanEncodedEvent(encodedEvent); err != nil {
+ return ScanResult{}, err
+ }
+
+ // if not mutated, return the original event.
+ if !result.Mutated {
+ result.Event = event
+ }
+
+ return result, err
+}
+
+// ScanEventsMap sends a map event to the SDS shared library for processing.
+func (s *Scanner) ScanEventsMap(event map[string]interface{}) (ScanResult, error) {
+ encodedEvent := make([]byte, 0)
+ encodedEvent, err := encodeMapEvent(event, encodedEvent)
+ if err != nil {
+ return ScanResult{}, err
+ }
+
+ return s.scanEncodedEvent(encodedEvent)
+}
+
+// ScanEventsList sends a list of event to the SDS shared library for processing.
+func (s *Scanner) ScanEventsList(event []interface{}) (ScanResult, error) {
+ encodedEvent := make([]byte, 0)
+ encodedEvent, err := encodeListEvent(event, encodedEvent)
+ if err != nil {
+ return ScanResult{}, err
+ }
+ return s.scanEncodedEvent(encodedEvent)
+}
+
+// encodeStringEvent encodes teh given event to send it to the SDS shared library.
+func encodeStringEvent(log []byte, result []byte) ([]byte, error) {
+ result = append(result, byte(3)) // string data
+ result = binary.BigEndian.AppendUint32(result, uint32(len(log)))
+ result = append(result, log...)
+ return result, nil
+}
+
+func encodeValueRecursive(v interface{}, result []byte) ([]byte, error) {
+ switch v := v.(type) {
+ case string:
+ return encodeStringEvent([]byte(v), result)
+ case map[string]interface{}:
+ return encodeMapEvent(v, result)
+ case []interface{}:
+ return encodeListEvent(v, result)
+ default:
+ return result, fmt.Errorf("encodeValueRecursive: unknown type %T", v)
+ }
+
+}
+
+func encodeMapEvent(event map[string]interface{}, result []byte) ([]byte, error) {
+ for k, v := range event {
+ // // push path field
+ result = append(result, 0) // push map type
+ result = binary.BigEndian.AppendUint32(result, uint32(len(k))) // length of the key
+ result = append(result, []byte(k)...) // key
+ var err error = nil
+ result, err = encodeValueRecursive(v, result)
+ if err != nil {
+ return result, err
+ }
+ // pop index
+ result = append(result, 2) // pop path index
+ }
+ return result, nil
+}
+
+func encodeListEvent(log []interface{}, result []byte) ([]byte, error) {
+ for idx, v := range log {
+ // push path field
+ result = append(result, 1) // push index
+ result = binary.BigEndian.AppendUint32(result, uint32(idx)) // index
+ var err error = nil
+ result, err = encodeValueRecursive(v, result)
+ if err != nil {
+ return result, err
+ }
+ // pop index
+ result = append(result, 2) // pop path index
+ }
+ return result, nil
+}
+
+// decodeResponse reads the binary response returned by the SDS shared library
+// on a `scan` call.
+func decodeResponse(rawData []byte) (ScanResult, error) {
+ buf := bytes.NewBuffer(rawData)
+
+ var result ScanResult
+
+ for buf.Len() > 0 {
+ typ, err := buf.ReadByte()
+ if err != nil {
+ return ScanResult{}, fmt.Errorf("decodeResponse: %v", err)
+ }
+
+ switch typ {
+ case 4: // Mutation
+ result.Mutated = true
+ if result.Event, err = decodeMutation(buf); err != nil {
+ return ScanResult{}, fmt.Errorf("decodeResponse: %v", err)
+ }
+ case 5: // Match
+ // starts with a rule ID
+ ruleIdx := binary.BigEndian.Uint32(buf.Next(4))
+
+ // then a path
+ path := decodeString(buf)
+
+ // then a replacement type
+ // TODO(remy): implement me
+ //replacementType := decodeString(buf)
+ decodeString(buf)
+
+ startIndex := binary.BigEndian.Uint32(buf.Next(4))
+ endIndexExclusive := binary.BigEndian.Uint32(buf.Next(4))
+ shiftOffset := int32(binary.BigEndian.Uint32(buf.Next(4)))
+
+ result.Matches = append(result.Matches, RuleMatch{
+ RuleIdx: ruleIdx,
+ Path: string(path),
+ StartIndex: startIndex,
+ EndIndexExclusive: endIndexExclusive,
+ ShiftOffset: shiftOffset,
+ })
+ default:
+ return ScanResult{}, fmt.Errorf("decodeResponse: can't decode response, unknown byte marker: %x", typ)
+ }
+ }
+
+ return result, nil
+}
+
+// decodeString using this format:
+// * 8 bytes: string size
+// * string size: the string
+// This method DO NOT copy data around but re-use the underlying sliceuffer instead.
+// Best usage si to use it after a call to `GoBytes` which takes care of copying
+// the data in the Go world.
+func decodeString(buf *bytes.Buffer) []byte {
+ size := binary.BigEndian.Uint32(buf.Next(4))
+ rv := buf.Next(int(size))
+ return rv
+}
+
+// decodeMutation returns the result of a mutation done by the SDS shared library.
+// TODO(remy): only the redacted/processed event is used, implement what's necessary
+// to return Path/Segment information.
+func decodeMutation(buf *bytes.Buffer) ([]byte, error) {
+ // first, we will be reading a possibly empty path
+ // if we see a '0' byte value, we are reading a field
+ // if we see a '1' byte value, we are reading an index
+ // if we see a '3' byte value, we are not reading a path anymore, but a content string
+ // of the possibly redacted event.
+ done := false
+ var processed []byte
+ for !done {
+ marker, err := buf.ReadByte()
+ if err != nil {
+ return nil, fmt.Errorf("decodeMutation: %v", err)
+ }
+ switch marker {
+ case 0:
+ // reading a field
+ // TODO(remy): not implemented: use the Path/Segments information
+ // and return it in the Go bindings Scan call.
+ decodeString(buf)
+ case 1:
+ // reading an index
+ // TODO(remy): not implemented: use the Path/Segments information
+ // and return it in the Go bindings Scan call.
+ binary.BigEndian.Uint32(buf.Next(4))
+ case 3:
+ // reading content string
+ processed = decodeString(buf)
+ done = true
+ }
+ }
+ return processed, nil
+}
diff --git a/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/sds.h b/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/sds.h
new file mode 100644
index 0000000000..28dd0adc96
--- /dev/null
+++ b/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/sds.h
@@ -0,0 +1,12 @@
+// From the Go documentation, it's recommended to include stdlib.h if we need
+// to use C.free.
+#include
+
+long create_scanner(const char* rules_as_json, const char** error);
+void delete_scanner(long scanner_id);
+
+// event is a non-null terminated TODO
+const char* scan(long scanner_id, const void* event, long event_size, long *retsize, long *retcap, const char** error);
+
+void free_vec(const char* string, long len, long cap);
+void free_string(const char* string);
diff --git a/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/staticcheck.conf b/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/staticcheck.conf
new file mode 100644
index 0000000000..f736a4b6f0
--- /dev/null
+++ b/vendor/github.com/DataDog/dd-sensitive-data-scanner/sds-go/go/staticcheck.conf
@@ -0,0 +1,7 @@
+checks = [
+ # go vet checks
+ "asmdecl", "assign", "atomic", "bools", "buildtag", "cgocall", "composites", "copylocks", "errorsas", "framepointer", "httpresponse", "ifaceassert", "loopclosure", "lostcancel", "nilfunc", "printf", "shift", "sigchanyzer", "stdmethods", "stringintconv", "structtag", "tests", "testinggoroutine", "unmarshal", "unreachable", "unsafeptr", "unusedresult",
+
+]
+http_status_code_whitelist = []
+initialisms = ["ACL", "API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "QPS", "RAM", "RPC", "SLA", "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", "UDP", "UI", "GID", "UID", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", "XSS", "SIP", "RTP", "AMQP", "DB", "TS"]
diff --git a/vendor/github.com/DataDog/go-tuf/client/client.go b/vendor/github.com/DataDog/go-tuf/client/client.go
index b364648e7f..6a3e137fda 100644
--- a/vendor/github.com/DataDog/go-tuf/client/client.go
+++ b/vendor/github.com/DataDog/go-tuf/client/client.go
@@ -4,7 +4,6 @@ import (
"bytes"
"encoding/hex"
"encoding/json"
- "errors"
"io"
"github.com/DataDog/go-tuf/data"
@@ -446,55 +445,6 @@ func (c *Client) getLocalMeta() error {
return nil
}
-// getDelegationPathFromRaw verifies a delegated targets against
-// a given snapshot and returns an error if it's invalid
-//
-// Delegation must have targets to get a path, else an empty list
-// will be returned: this is because the delegation iterator is leveraged.
-//
-// Concrete example:
-// targets
-// └── a.json
-// └── b.json
-// └── c.json
-// └── target_file.txt
-//
-// If you try to use that function on "a.json" or "b.json", it'll return an empty list
-// with no error, as neither of them declare a target file
-// On the other hand, if you use that function on "c.json", it'll return & verify
-// [c.json, b.json, a.json]. Running that function on every delegated targets
-// guarantees that if a delegated targets is in the path of a target file, then it will
-// appear at least once in the result
-func (c *Client) getDelegationPathFromRaw(snapshot *data.Snapshot, delegatedTargetsJSON json.RawMessage) ([]string, error) {
- // unmarshal the delegated targets first without verifying as
- // we need at least one targets file name to leverage the
- // getTargetFileMetaDelegationPath method
- s := &data.Signed{}
- if err := json.Unmarshal(delegatedTargetsJSON, s); err != nil {
- return nil, err
- }
- targets := &data.Targets{}
- if err := json.Unmarshal(s.Signed, targets); err != nil {
- return nil, err
- }
- for targetPath := range targets.Targets {
- // Gets target file from remote store
- _, resp, err := c.getTargetFileMetaDelegationPath(targetPath, snapshot)
- // We only need to test one targets file:
- // - If it is valid, it means the delegated targets has been validated
- // - If it is not, the delegated targets isn't valid
- if errors.As(err, &ErrMissingRemoteMetadata{}) {
- // As this function is used to fill the local store cache, the targets
- // will be downloaded from the remote store as the local store cache is
- // empty, meaning that the delegated targets may not exist anymore. In
- // that case, ignore it.
- return nil, nil
- }
- return resp, err
- }
- return nil, nil
-}
-
// loadAndVerifyLocalRootMeta decodes and verifies root metadata from
// local storage and loads the top-level keys. This method first clears
// the DB for top-level keys and then loads the new keys.
@@ -875,10 +825,17 @@ type Destination interface {
// - Size of the download does not match if the reported size is known and
// incorrect
func (c *Client) Download(name string, dest Destination) (err error) {
+ return c.DownloadBatch(map[string]Destination{name: dest})
+}
+
+// DownloadBatch is a batched version of Download.
+func (c *Client) DownloadBatch(targetFiles map[string]Destination) (err error) {
// delete dest if there is an error
defer func() {
if err != nil {
- dest.Delete()
+ for _, dest := range targetFiles {
+ dest.Delete()
+ }
}
}()
@@ -889,17 +846,27 @@ func (c *Client) Download(name string, dest Destination) (err error) {
}
}
- normalizedName := util.NormalizeTarget(name)
- localMeta, ok := c.targets[normalizedName]
- if !ok {
- // search in delegations
- localMeta, err = c.getTargetFileMeta(normalizedName)
+ var names []string
+ for name := range targetFiles {
+ names = append(names, name)
+ }
+ targets, err := c.getTargetFileMetas(names)
+ if err != nil {
+ return err
+ }
+
+ for name, dest := range targetFiles {
+ err := c.download(name, targets[name], dest)
if err != nil {
return err
}
}
+ return nil
+}
+func (c *Client) download(name string, localMeta data.TargetFileMeta, dest Destination) error {
// get the data from remote storage
+ normalizedName := util.NormalizeTarget(name)
r, size, err := c.downloadTarget(normalizedName, c.remote.GetTarget, localMeta.Hashes)
if err != nil {
return err
@@ -927,7 +894,6 @@ func (c *Client) Download(name string, dest Destination) (err error) {
}
return ErrDownloadFailed{name, err}
}
-
return nil
}
@@ -958,16 +924,23 @@ func (c *Client) VerifyDigest(digest string, digestAlg string, length int64, pat
// exists, searching from top-level level targets then through
// all delegations. If it does not, ErrNotFound will be returned.
func (c *Client) Target(name string) (data.TargetFileMeta, error) {
- target, err := c.getTargetFileMeta(util.NormalizeTarget(name))
- if err == nil {
- return target, nil
+ targets, err := c.TargetBatch([]string{name})
+ if err != nil {
+ return data.TargetFileMeta{}, err
}
+ return targets[name], nil
+}
+// TargetBatch is a batched version of Target.
+func (c *Client) TargetBatch(names []string) (data.TargetFiles, error) {
+ targets, err := c.getTargetFileMetas(names)
+ if err == nil {
+ return targets, nil
+ }
if _, ok := err.(ErrUnknownTarget); ok {
- return data.TargetFileMeta{}, ErrNotFound{name}
+ return nil, ErrNotFound{err.(ErrUnknownTarget).Name}
}
-
- return data.TargetFileMeta{}, err
+ return nil, err
}
// Targets returns the complete list of available top-level targets.
diff --git a/vendor/github.com/DataDog/go-tuf/client/delegations.go b/vendor/github.com/DataDog/go-tuf/client/delegations.go
index 4cf5404554..96f10bbdfa 100644
--- a/vendor/github.com/DataDog/go-tuf/client/delegations.go
+++ b/vendor/github.com/DataDog/go-tuf/client/delegations.go
@@ -3,28 +3,69 @@ package client
import (
"github.com/DataDog/go-tuf/data"
"github.com/DataDog/go-tuf/pkg/targets"
+ "github.com/DataDog/go-tuf/util"
"github.com/DataDog/go-tuf/verify"
)
+type delegatedTargetsCache struct {
+ meta map[string]*data.Targets
+}
+
+func newDelegatedTargetsCache() *delegatedTargetsCache {
+ return &delegatedTargetsCache{
+ meta: make(map[string]*data.Targets),
+ }
+}
+
+func (c *delegatedTargetsCache) loadDelegatedTargets(client *Client, snapshot *data.Snapshot, role string, db *verify.DB) (*data.Targets, error) {
+ if t, ok := c.meta[role]; ok {
+ return t, nil
+ }
+
+ targets, err := client.loadDelegatedTargets(snapshot, role, db)
+ if err != nil {
+ return nil, err
+ }
+
+ c.meta[role] = targets
+ return targets, nil
+}
+
// getTargetFileMeta searches for a verified TargetFileMeta matching a target
// Requires a local snapshot to be loaded and is locked to the snapshot versions.
func (c *Client) getTargetFileMeta(target string) (data.TargetFileMeta, error) {
- snapshot, err := c.loadLocalSnapshot()
+ metas, err := c.getTargetFileMetas([]string{target})
if err != nil {
return data.TargetFileMeta{}, err
}
+ return metas[target], nil
+}
- targetFileMeta, _, err := c.getTargetFileMetaDelegationPath(target, snapshot)
+func (c *Client) getTargetFileMetas(targets []string) (data.TargetFiles, error) {
+ snapshot, err := c.loadLocalSnapshot()
if err != nil {
- return data.TargetFileMeta{}, err
+ return nil, err
+ }
+ cache := newDelegatedTargetsCache()
+ targetFileMetas := make(data.TargetFiles, len(targets))
+ for _, target := range targets {
+ normalizedTarget := util.NormalizeTarget(target)
+ targetFileMeta, _, err := c.getTargetFileMetaDelegationPath(normalizedTarget, snapshot, cache)
+ if _, ok := err.(ErrUnknownTarget); ok {
+ return nil, ErrUnknownTarget{target, snapshot.Version}
+ }
+ if err != nil {
+ return nil, err
+ }
+ targetFileMetas[target] = targetFileMeta
}
- return targetFileMeta, nil
+ return targetFileMetas, nil
}
// getTargetFileMetaDelegationPath searches for a verified TargetFileMeta matching a target
// Requires snapshot to be passed and is locked to that specific snapshot versions.
// Searches through delegated targets following TUF spec 1.0.19 section 5.6.
-func (c *Client) getTargetFileMetaDelegationPath(target string, snapshot *data.Snapshot) (data.TargetFileMeta, []string, error) {
+func (c *Client) getTargetFileMetaDelegationPath(target string, snapshot *data.Snapshot, cache *delegatedTargetsCache) (data.TargetFileMeta, []string, error) {
// delegationsIterator covers 5.6.7
// - pre-order depth-first search starting with the top targets
// - filter delegations with paths or path_hash_prefixes matching searched target
@@ -45,7 +86,7 @@ func (c *Client) getTargetFileMetaDelegationPath(target string, snapshot *data.S
}
// covers 5.6.{1,2,3,4,5,6}
- targets, err := c.loadDelegatedTargets(snapshot, d.Delegatee.Name, d.DB)
+ targets, err := cache.loadDelegatedTargets(c, snapshot, d.Delegatee.Name, d.DB)
if err != nil {
return data.TargetFileMeta{}, nil, err
}
diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata/gohai/gohai.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata/gohai/gohai.go
index 618419f92e..20c05dbbc6 100644
--- a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata/gohai/gohai.go
+++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata/gohai/gohai.go
@@ -82,17 +82,29 @@ type Payload struct {
}
// Platform returns a reference to the Gohai payload 'platform' map.
+// It initializes the field if nil.
func (p *Payload) Platform() map[string]string {
+ if p.Gohai.Gohai.Platform == nil {
+ p.Gohai.Gohai.Platform = map[string]string{}
+ }
return p.Gohai.Gohai.Platform
}
// CPU returns a reference to the Gohai payload 'cpu' map.
+// It initializes the field if nil.
func (p *Payload) CPU() map[string]string {
+ if p.Gohai.Gohai.CPU == nil {
+ p.Gohai.Gohai.CPU = map[string]string{}
+ }
return p.Gohai.Gohai.CPU
}
// Network returns a reference to the Gohai payload 'network' map.
+// It initializes the field if nil.
func (p *Payload) Network() map[string]any {
+ if p.Gohai.Gohai.Network == nil {
+ p.Gohai.Gohai.Network = map[string]any{}
+ }
return p.Gohai.Gohai.Network
}
diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata/payload/payload.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata/payload/payload.go
index eecffbd3b3..c39a1350a1 100644
--- a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata/payload/payload.go
+++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata/payload/payload.go
@@ -77,3 +77,15 @@ type Meta struct {
// HostAliases are other available host names
HostAliases []string `json:"host_aliases,omitempty"`
}
+
+// NewEmpty creates a new HostMetadata with empty fields.
+// Pointer fields are initialized to empty structs.
+// All other fields are initialized to their zero value.
+func NewEmpty() HostMetadata {
+ return HostMetadata{
+ Meta: &Meta{},
+ Tags: &HostTags{},
+ Payload: gohai.NewEmpty(),
+ Processes: &gohai.ProcessesPayload{},
+ }
+}
diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata/reporter.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata/reporter.go
index 26107990b8..1561ac8e43 100644
--- a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata/reporter.go
+++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata/reporter.go
@@ -109,7 +109,10 @@ func (r *Reporter) pushAndLog(ctx context.Context, hm payload.HostMetadata) {
func (r *Reporter) hostname(res pcommon.Resource) (string, bool) {
src, ok := attributes.SourceFromAttrs(res.Attributes())
if !ok {
- r.logger.Warn("resource does not have host-identifying attributes", zap.Any("attributes", res.Attributes()))
+ r.logger.Warn("resource does not have host-identifying attributes",
+ zap.Any("attributes", res.Attributes().AsRaw()),
+ zap.String("further info", "https://docs.datadoghq.com/opentelemetry/schema_semantics/hostname/?tab=datadogexporter"),
+ )
return "", false
}
if src.Kind != source.HostnameKind {
diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/attributes.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/attributes.go
index 9b305c868c..d5152362d0 100644
--- a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/attributes.go
+++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/attributes.go
@@ -92,6 +92,81 @@ var (
"app.kubernetes.io/part-of": "kube_app_part_of",
"app.kubernetes.io/managed-by": "kube_app_managed_by",
}
+
+ // Kubernetes out of the box Datadog tags
+ // https://docs.datadoghq.com/containers/kubernetes/tag/?tab=containerizedagent#out-of-the-box-tags
+ // https://github.com/DataDog/datadog-agent/blob/d33d042d6786e8b85f72bb627fbf06ad8a658031/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go
+ // Note: if any OTel semantics happen to overlap with these tag names, they will also be added as Datadog tags.
+ kubernetesDDTags = map[string]struct{}{
+ "architecture": {},
+ "availability-zone": {},
+ "chronos_job": {},
+ "chronos_job_owner": {},
+ "cluster_name": {},
+ "container_id": {},
+ "container_name": {},
+ "dd_remote_config_id": {},
+ "dd_remote_config_rev": {},
+ "display_container_name": {},
+ "docker_image": {},
+ "ecs_cluster_name": {},
+ "ecs_container_name": {},
+ "eks_fargate_node": {},
+ "env": {},
+ "git.commit.sha": {},
+ "git.repository_url": {},
+ "image_id": {},
+ "image_name": {},
+ "image_tag": {},
+ "kube_app_component": {},
+ "kube_app_instance": {},
+ "kube_app_managed_by": {},
+ "kube_app_name": {},
+ "kube_app_part_of": {},
+ "kube_app_version": {},
+ "kube_container_name": {},
+ "kube_cronjob": {},
+ "kube_daemon_set": {},
+ "kube_deployment": {},
+ "kube_job": {},
+ "kube_namespace": {},
+ "kube_ownerref_kind": {},
+ "kube_ownerref_name": {},
+ "kube_priority_class": {},
+ "kube_qos": {},
+ "kube_replica_set": {},
+ "kube_replication_controller": {},
+ "kube_service": {},
+ "kube_stateful_set": {},
+ "language": {},
+ "marathon_app": {},
+ "mesos_task": {},
+ "nomad_dc": {},
+ "nomad_group": {},
+ "nomad_job": {},
+ "nomad_namespace": {},
+ "nomad_task": {},
+ "oshift_deployment": {},
+ "oshift_deployment_config": {},
+ "os_name": {},
+ "os_version": {},
+ "persistentvolumeclaim": {},
+ "pod_name": {},
+ "pod_phase": {},
+ "rancher_container": {},
+ "rancher_service": {},
+ "rancher_stack": {},
+ "region": {},
+ "service": {},
+ "short_image": {},
+ "swarm_namespace": {},
+ "swarm_service": {},
+ "task_name": {},
+ "task_family": {},
+ "task_version": {},
+ "task_arn": {},
+ "version": {},
+ }
)
// TagsFromAttributes converts a selected list of attributes
@@ -132,6 +207,11 @@ func TagsFromAttributes(attrs pcommon.Map) []string {
if datadogKey, found := kubernetesMapping[key]; found && value.Str() != "" {
tags = append(tags, fmt.Sprintf("%s:%s", datadogKey, value.Str()))
}
+
+ // Kubernetes DD tags
+ if _, found := kubernetesDDTags[key]; found {
+ tags = append(tags, fmt.Sprintf("%s:%s", key, value.Str()))
+ }
return true
})
diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/consumer.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/consumer.go
index 56e023f3f2..1c7c3a6774 100644
--- a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/consumer.go
+++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/consumer.go
@@ -88,9 +88,9 @@ type SketchConsumer interface {
type Consumer interface {
TimeSeriesConsumer
SketchConsumer
- APMStatsConsumer
}
+// Deprecated: use WithStatsOut instead
// APMStatsConsumer implementations are able to consume APM Stats generated by
// a Translator.
type APMStatsConsumer interface {
diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/dimensions.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/dimensions.go
index 13ce034802..8187607e7a 100644
--- a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/dimensions.go
+++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/dimensions.go
@@ -37,9 +37,9 @@ type Dimensions struct {
host string
originID string
- originProduct OriginProduct
- originCategory OriginCategory
- originService OriginService
+ originProduct OriginProduct
+ originSubProduct OriginSubProduct
+ originProductDetail OriginProductDetail
}
// Name of the metric.
@@ -67,14 +67,14 @@ func (d *Dimensions) OriginProduct() OriginProduct {
return d.originProduct
}
-// OriginCategory of the metric.
-func (d *Dimensions) OriginCategory() OriginCategory {
- return d.originCategory
+// OriginSubProduct of the metric.
+func (d *Dimensions) OriginSubProduct() OriginSubProduct {
+ return d.originSubProduct
}
-// OriginService of the metric.
-func (d *Dimensions) OriginService() OriginService {
- return d.originService
+// OriginProductDetail of the metric.
+func (d *Dimensions) OriginProductDetail() OriginProductDetail {
+ return d.originProductDetail
}
// getTags maps an attributeMap into a slice of Datadog tags
@@ -95,13 +95,13 @@ func (d *Dimensions) AddTags(tags ...string) *Dimensions {
newTags = append(newTags, tags...)
newTags = append(newTags, d.tags...)
return &Dimensions{
- name: d.name,
- tags: newTags,
- host: d.host,
- originID: d.originID,
- originProduct: d.originProduct,
- originCategory: d.originCategory,
- originService: d.originService,
+ name: d.name,
+ tags: newTags,
+ host: d.host,
+ originID: d.originID,
+ originProduct: d.originProduct,
+ originSubProduct: d.originSubProduct,
+ originProductDetail: d.originProductDetail,
}
}
@@ -113,13 +113,13 @@ func (d *Dimensions) WithAttributeMap(labels pcommon.Map) *Dimensions {
// WithSuffix creates a new dimensions struct with an extra name suffix.
func (d *Dimensions) WithSuffix(suffix string) *Dimensions {
return &Dimensions{
- name: fmt.Sprintf("%s.%s", d.name, suffix),
- host: d.host,
- tags: d.tags,
- originID: d.originID,
- originProduct: d.originProduct,
- originCategory: d.originCategory,
- originService: d.originService,
+ name: fmt.Sprintf("%s.%s", d.name, suffix),
+ host: d.host,
+ tags: d.tags,
+ originID: d.originID,
+ originProduct: d.originProduct,
+ originSubProduct: d.originSubProduct,
+ originProductDetail: d.originProductDetail,
}
}
diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/metrics_translator.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/metrics_translator.go
index f0d48a74cb..70e4bc506f 100644
--- a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/metrics_translator.go
+++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/metrics_translator.go
@@ -151,6 +151,11 @@ func (t *Translator) mapNumberMetrics(
for i := 0; i < slice.Len(); i++ {
p := slice.At(i)
+ if p.Flags().NoRecordedValue() {
+ // No recorded value, skip.
+ continue
+ }
+
pointDims := dims.WithAttributeMap(p.Attributes())
var val float64
switch p.ValueType() {
@@ -203,6 +208,11 @@ func (t *Translator) mapNumberMonotonicMetrics(
) {
for i := 0; i < slice.Len(); i++ {
p := slice.At(i)
+ if p.Flags().NoRecordedValue() {
+ // No recorded value, skip.
+ continue
+ }
+
ts := uint64(p.Timestamp())
startTs := uint64(p.StartTimestamp())
pointDims := dims.WithAttributeMap(p.Attributes())
@@ -450,6 +460,11 @@ func (t *Translator) mapHistogramMetrics(
) {
for i := 0; i < slice.Len(); i++ {
p := slice.At(i)
+ if p.Flags().NoRecordedValue() {
+ // No recorded value, skip.
+ continue
+ }
+
startTs := uint64(p.StartTimestamp())
ts := uint64(p.Timestamp())
pointDims := dims.WithAttributeMap(p.Attributes())
@@ -554,6 +569,11 @@ func (t *Translator) mapSummaryMetrics(
for i := 0; i < slice.Len(); i++ {
p := slice.At(i)
+ if p.Flags().NoRecordedValue() {
+ // No recorded value, skip.
+ continue
+ }
+
startTs := uint64(p.StartTimestamp())
ts := uint64(p.Timestamp())
pointDims := dims.WithAttributeMap(p.Attributes())
@@ -713,15 +733,6 @@ func (t *Translator) MapMetrics(ctx context.Context, md pmetric.Metrics, consume
rms := md.ResourceMetrics()
for i := 0; i < rms.Len(); i++ {
rm := rms.At(i)
- if v, ok := rm.Resource().Attributes().Get(keyAPMStats); ok && v.Bool() {
- // these resource metrics are an APM Stats payload; consume it as such
- sp, err := t.statsPayloadFromMetrics(rm)
- if err != nil {
- return metadata, fmt.Errorf("error extracting APM Stats from Metrics: %w", err)
- }
- consumer.ConsumeAPMStats(sp)
- continue
- }
src, err := t.source(ctx, rm.Resource())
if err != nil {
return metadata, err
@@ -808,13 +819,13 @@ func (t *Translator) MapMetrics(ctx context.Context, md pmetric.Metrics, consume
func (t *Translator) mapToDDFormat(ctx context.Context, md pmetric.Metric, consumer Consumer, additionalTags []string, host string, scopeName string, rattrs pcommon.Map) {
baseDims := &Dimensions{
- name: md.Name(),
- tags: additionalTags,
- host: host,
- originID: attributes.OriginIDFromAttributes(rattrs),
- originProduct: t.cfg.originProduct,
- originCategory: OriginCategoryOTLP,
- originService: originServiceFromScopeName(scopeName),
+ name: md.Name(),
+ tags: additionalTags,
+ host: host,
+ originID: attributes.OriginIDFromAttributes(rattrs),
+ originProduct: t.cfg.originProduct,
+ originSubProduct: OriginSubProductOTLP,
+ originProductDetail: originProductDetailFromScopeName(scopeName),
}
switch md.Type() {
case pmetric.MetricTypeGauge:
diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/origin.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/origin.go
index 9abbca7ace..deb37e8a0f 100644
--- a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/origin.go
+++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/origin.go
@@ -26,6 +26,8 @@ const (
OriginProductUnknown OriginProduct = 0
// OriginProductDatadogAgent is the origin for metrics coming from the Datadog Agent OTLP Ingest.
OriginProductDatadogAgent OriginProduct = 10
+ // OriginProductDatadogExporter is the origin for metrics coming from the OpenTelemetry Collector Datadog Exporter.
+ OriginProductDatadogExporter OriginProduct = 19
)
func (o OriginProduct) String() string {
@@ -34,81 +36,83 @@ func (o OriginProduct) String() string {
return "unknown"
case OriginProductDatadogAgent:
return "datadog-agent"
+ case OriginProductDatadogExporter:
+ return "datadog-exporter"
default:
return fmt.Sprintf("OriginProduct(%d)", o)
}
}
-// OriginCategory defines the origin category.
-type OriginCategory int32
+// OriginSubProduct defines the origin subproduct.
+type OriginSubProduct int32
-// OriginCategoryOTLP is the origin category for all metrics coming from OTLP.
-// All metrics produced by the translator MUST have origin category set to OTLP.
-const OriginCategoryOTLP OriginCategory = 17
+// OriginSubProductOTLP is the origin subproduct for all metrics coming from OTLP.
+// All metrics produced by the translator MUST have origin subproduct set to OTLP.
+const OriginSubProductOTLP OriginSubProduct = 17
-func (o OriginCategory) String() string {
+func (o OriginSubProduct) String() string {
switch o {
- case OriginCategoryOTLP:
+ case OriginSubProductOTLP:
return "otlp"
default:
- return fmt.Sprintf("OriginCategory(%d)", o)
+ return fmt.Sprintf("OriginSubProduct(%d)", o)
}
}
-// OriginService defines the origin service.
-type OriginService int32
+// OriginProductDetail defines the origin service.
+type OriginProductDetail int32
// List all receivers that set the scope name.
const (
- OriginServiceUnknown OriginService = 0
- OriginServiceActiveDirectoryDSReceiver OriginService = 251
- OriginServiceAerospikeReceiver OriginService = 252
- OriginServiceApacheReceiver OriginService = 253
- OriginServiceApacheSparkReceiver OriginService = 254
- OriginServiceAzureMonitorReceiver OriginService = 255
- OriginServiceBigIPReceiver OriginService = 256
- OriginServiceChronyReceiver OriginService = 257
- OriginServiceCouchDBReceiver OriginService = 258
- OriginServiceDockerStatsReceiver OriginService = 217
- OriginServiceElasticsearchReceiver OriginService = 218
- OriginServiceExpVarReceiver OriginService = 219
- OriginServiceFileStatsReceiver OriginService = 220
- OriginServiceFlinkMetricsReceiver OriginService = 221
- OriginServiceGitProviderReceiver OriginService = 222
- OriginServiceHAProxyReceiver OriginService = 223
- OriginServiceHostMetricsReceiver OriginService = 224
- OriginServiceHTTPCheckReceiver OriginService = 225
- OriginServiceIISReceiver OriginService = 226
- OriginServiceK8SClusterReceiver OriginService = 227
- OriginServiceKafkaMetricsReceiver OriginService = 228
- OriginServiceKubeletStatsReceiver OriginService = 229
- OriginServiceMemcachedReceiver OriginService = 230
- OriginServiceMongoDBAtlasReceiver OriginService = 231
- OriginServiceMongoDBReceiver OriginService = 232
- OriginServiceMySQLReceiver OriginService = 233
- OriginServiceNginxReceiver OriginService = 234
- OriginServiceNSXTReceiver OriginService = 235
- OriginServiceOracleDBReceiver OriginService = 236
- OriginServicePostgreSQLReceiver OriginService = 237
- OriginServicePrometheusReceiver OriginService = 238
- OriginServiceRabbitMQReceiver OriginService = 239
- OriginServiceRedisReceiver OriginService = 240
- OriginServiceRiakReceiver OriginService = 241
- OriginServiceSAPHANAReceiver OriginService = 242
- OriginServiceSNMPReceiver OriginService = 243
- OriginServiceSnowflakeReceiver OriginService = 244
- OriginServiceSplunkEnterpriseReceiver OriginService = 245
- OriginServiceSQLServerReceiver OriginService = 246
- OriginServiceSSHCheckReceiver OriginService = 247
- OriginServiceStatsDReceiver OriginService = 248
- OriginServiceVCenterReceiver OriginService = 249
- OriginServiceZookeeperReceiver OriginService = 250
+ OriginProductDetailUnknown OriginProductDetail = 0
+ OriginProductDetailActiveDirectoryDSReceiver OriginProductDetail = 251
+ OriginProductDetailAerospikeReceiver OriginProductDetail = 252
+ OriginProductDetailApacheReceiver OriginProductDetail = 253
+ OriginProductDetailApacheSparkReceiver OriginProductDetail = 254
+ OriginProductDetailAzureMonitorReceiver OriginProductDetail = 255
+ OriginProductDetailBigIPReceiver OriginProductDetail = 256
+ OriginProductDetailChronyReceiver OriginProductDetail = 257
+ OriginProductDetailCouchDBReceiver OriginProductDetail = 258
+ OriginProductDetailDockerStatsReceiver OriginProductDetail = 217
+ OriginProductDetailElasticsearchReceiver OriginProductDetail = 218
+ OriginProductDetailExpVarReceiver OriginProductDetail = 219
+ OriginProductDetailFileStatsReceiver OriginProductDetail = 220
+ OriginProductDetailFlinkMetricsReceiver OriginProductDetail = 221
+ OriginProductDetailGitProviderReceiver OriginProductDetail = 222
+ OriginProductDetailHAProxyReceiver OriginProductDetail = 223
+ OriginProductDetailHostMetricsReceiver OriginProductDetail = 224
+ OriginProductDetailHTTPCheckReceiver OriginProductDetail = 225
+ OriginProductDetailIISReceiver OriginProductDetail = 226
+ OriginProductDetailK8SClusterReceiver OriginProductDetail = 227
+ OriginProductDetailKafkaMetricsReceiver OriginProductDetail = 228
+ OriginProductDetailKubeletStatsReceiver OriginProductDetail = 229
+ OriginProductDetailMemcachedReceiver OriginProductDetail = 230
+ OriginProductDetailMongoDBAtlasReceiver OriginProductDetail = 231
+ OriginProductDetailMongoDBReceiver OriginProductDetail = 232
+ OriginProductDetailMySQLReceiver OriginProductDetail = 233
+ OriginProductDetailNginxReceiver OriginProductDetail = 234
+ OriginProductDetailNSXTReceiver OriginProductDetail = 235
+ OriginProductDetailOracleDBReceiver OriginProductDetail = 236
+ OriginProductDetailPostgreSQLReceiver OriginProductDetail = 237
+ OriginProductDetailPrometheusReceiver OriginProductDetail = 238
+ OriginProductDetailRabbitMQReceiver OriginProductDetail = 239
+ OriginProductDetailRedisReceiver OriginProductDetail = 240
+ OriginProductDetailRiakReceiver OriginProductDetail = 241
+ OriginProductDetailSAPHANAReceiver OriginProductDetail = 242
+ OriginProductDetailSNMPReceiver OriginProductDetail = 243
+ OriginProductDetailSnowflakeReceiver OriginProductDetail = 244
+ OriginProductDetailSplunkEnterpriseReceiver OriginProductDetail = 245
+ OriginProductDetailSQLServerReceiver OriginProductDetail = 246
+ OriginProductDetailSSHCheckReceiver OriginProductDetail = 247
+ OriginProductDetailStatsDReceiver OriginProductDetail = 248
+ OriginProductDetailVCenterReceiver OriginProductDetail = 249
+ OriginProductDetailZookeeperReceiver OriginProductDetail = 250
)
-func originServiceFromScopeName(scopeName string) OriginService {
+func originProductDetailFromScopeName(scopeName string) OriginProductDetail {
const collectorPrefix = "otelcol/"
if !strings.HasPrefix(scopeName, collectorPrefix) {
- return OriginServiceUnknown
+ return OriginProductDetailUnknown
}
// otelcol/kubeletstatsreceiver -> kubeletstatsreceiver
@@ -118,90 +122,90 @@ func originServiceFromScopeName(scopeName string) OriginService {
// otelcol
switch receiverName {
case "activedirectorydsreceiver":
- return OriginServiceActiveDirectoryDSReceiver
+ return OriginProductDetailActiveDirectoryDSReceiver
case "aerospikereceiver":
- return OriginServiceAerospikeReceiver
+ return OriginProductDetailAerospikeReceiver
case "apachereceiver":
- return OriginServiceApacheReceiver
+ return OriginProductDetailApacheReceiver
case "apachesparkreceiver":
- return OriginServiceApacheSparkReceiver
+ return OriginProductDetailApacheSparkReceiver
case "azuremonitorreceiver":
- return OriginServiceAzureMonitorReceiver
+ return OriginProductDetailAzureMonitorReceiver
case "bigipreceiver":
- return OriginServiceBigIPReceiver
+ return OriginProductDetailBigIPReceiver
case "chronyreceiver":
- return OriginServiceChronyReceiver
+ return OriginProductDetailChronyReceiver
case "couchdbreceiver":
- return OriginServiceCouchDBReceiver
+ return OriginProductDetailCouchDBReceiver
case "dockerstatsreceiver":
- return OriginServiceDockerStatsReceiver
+ return OriginProductDetailDockerStatsReceiver
case "elasticsearchreceiver":
- return OriginServiceElasticsearchReceiver
+ return OriginProductDetailElasticsearchReceiver
case "expvarreceiver":
- return OriginServiceExpVarReceiver
+ return OriginProductDetailExpVarReceiver
case "filestatsreceiver":
- return OriginServiceFileStatsReceiver
+ return OriginProductDetailFileStatsReceiver
case "flinkmetricsreceiver":
- return OriginServiceFlinkMetricsReceiver
+ return OriginProductDetailFlinkMetricsReceiver
case "gitproviderreceiver":
- return OriginServiceGitProviderReceiver
+ return OriginProductDetailGitProviderReceiver
case "haproxyreceiver":
- return OriginServiceHAProxyReceiver
+ return OriginProductDetailHAProxyReceiver
case "hostmetricsreceiver":
- return OriginServiceHostMetricsReceiver
+ return OriginProductDetailHostMetricsReceiver
case "httpcheckreceiver":
- return OriginServiceHTTPCheckReceiver
+ return OriginProductDetailHTTPCheckReceiver
case "iisreceiver":
- return OriginServiceIISReceiver
+ return OriginProductDetailIISReceiver
case "k8sclusterreceiver":
- return OriginServiceK8SClusterReceiver
+ return OriginProductDetailK8SClusterReceiver
case "kafkametricsreceiver":
- return OriginServiceKafkaMetricsReceiver
+ return OriginProductDetailKafkaMetricsReceiver
case "kubeletstatsreceiver":
- return OriginServiceKubeletStatsReceiver
+ return OriginProductDetailKubeletStatsReceiver
case "memcachedreceiver":
- return OriginServiceMemcachedReceiver
+ return OriginProductDetailMemcachedReceiver
case "mongodbatlasreceiver":
- return OriginServiceMongoDBAtlasReceiver
+ return OriginProductDetailMongoDBAtlasReceiver
case "mongodbreceiver":
- return OriginServiceMongoDBReceiver
+ return OriginProductDetailMongoDBReceiver
case "mysqlreceiver":
- return OriginServiceMySQLReceiver
+ return OriginProductDetailMySQLReceiver
case "nginxreceiver":
- return OriginServiceNginxReceiver
+ return OriginProductDetailNginxReceiver
case "nsxtreceiver":
- return OriginServiceNSXTReceiver
+ return OriginProductDetailNSXTReceiver
case "oracledbreceiver":
- return OriginServiceOracleDBReceiver
+ return OriginProductDetailOracleDBReceiver
case "postgresqlreceiver":
- return OriginServicePostgreSQLReceiver
+ return OriginProductDetailPostgreSQLReceiver
case "prometheusreceiver":
- return OriginServicePrometheusReceiver
+ return OriginProductDetailPrometheusReceiver
case "rabbitmqreceiver":
- return OriginServiceRabbitMQReceiver
+ return OriginProductDetailRabbitMQReceiver
case "redisreceiver":
- return OriginServiceRedisReceiver
+ return OriginProductDetailRedisReceiver
case "riakreceiver":
- return OriginServiceRiakReceiver
+ return OriginProductDetailRiakReceiver
case "saphanareceiver":
- return OriginServiceSAPHANAReceiver
+ return OriginProductDetailSAPHANAReceiver
case "snmpreceiver":
- return OriginServiceSNMPReceiver
+ return OriginProductDetailSNMPReceiver
case "snowflakereceiver":
- return OriginServiceSnowflakeReceiver
+ return OriginProductDetailSnowflakeReceiver
case "splunkenterprisereceiver":
- return OriginServiceSplunkEnterpriseReceiver
+ return OriginProductDetailSplunkEnterpriseReceiver
case "sqlserverreceiver":
- return OriginServiceSQLServerReceiver
+ return OriginProductDetailSQLServerReceiver
case "sshcheckreceiver":
- return OriginServiceSSHCheckReceiver
+ return OriginProductDetailSSHCheckReceiver
case "statsdreceiver":
- return OriginServiceStatsDReceiver
+ return OriginProductDetailStatsDReceiver
case "vcenterreceiver":
- return OriginServiceVCenterReceiver
+ return OriginProductDetailVCenterReceiver
case "zookeeperreceiver":
- return OriginServiceZookeeperReceiver
+ return OriginProductDetailZookeeperReceiver
}
- return OriginServiceUnknown
+ return OriginProductDetailUnknown
}
diff --git a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/statspayload.go b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/statspayload.go
index 0ae556fadf..53b3f7aa13 100644
--- a/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/statspayload.go
+++ b/vendor/github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics/statspayload.go
@@ -15,468 +15,16 @@
package metrics
import (
- "context"
- "fmt"
- "strings"
-
pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace"
- "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source"
- "github.com/DataDog/sketches-go/ddsketch"
- "github.com/DataDog/sketches-go/ddsketch/mapping"
- "github.com/DataDog/sketches-go/ddsketch/pb/sketchpb"
- "github.com/DataDog/sketches-go/ddsketch/store"
"github.com/golang/protobuf/proto"
- "go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.uber.org/zap"
)
-// keyAPMStats specifies the key name of the resource attribute which identifies resource metrics
-// as being an APM Stats Payload. The presence of the key results in them being treated and consumed
-// differently by the Translator.
-const keyAPMStats = "_dd.apm_stats"
-
// keyStatsPayload is the key for the stats payload in the attributes map.
// This is used as Metric name and Attribute key.
const keyStatsPayload = "dd.internal.stats.payload"
-// This group of constants specifies the metric attribute keys used for APM Stats aggregation keys.
-const (
- statsKeyHostname = "dd.hostname"
- statsKeyEnv = "dd.env"
- statsKeyVersion = "dd.version"
- statsKeyLang = "dd.lang"
- statsKeyTracerVersion = "dd.tracer_version"
- statsKeyRuntimeID = "dd.runtime_id"
- statsKeySequence = "dd.sequence"
- statsKeyAgentAggregation = "dd.agent_aggregation"
- statsKeyService = "dd.service"
- statsKeyContainerID = "dd.container_id"
- statsKeyTags = "dd.tags"
- statsKeySynthetics = "dd.synthetics"
- statsKeySpanName = "dd.name"
- statsKeySpanResource = "dd.resource"
- statsKeyHTTPStatusCode = "dd.http_status_code"
- statsKeySpanType = "dd.type"
- statsKeySpanDBType = "dd.db_type"
-)
-
-// This group of constants specifies the metric names used to store APM Stats as metrics.
-const (
- metricNameHits = "dd.apm_stats.hits"
- metricNameErrors = "dd.apm_stats.errors"
- metricNameDuration = "dd.apm_stats.duration"
- metricNameTopLevelHits = "dd.apm_stats.top_level_hits"
- metricNameOkSummary = "dd.apm_stats.ok_summary"
- metricNameErrorSummary = "dd.apm_stats.error_summary"
-)
-
-// StatsPayloadToMetrics converts an APM Stats Payload to a set of OTLP Metrics.
-func (t *Translator) StatsPayloadToMetrics(sp *pb.StatsPayload) pmetric.Metrics {
- mmx := pmetric.NewMetrics()
- // We ignore Agent{Hostname,Env,Version} and fill those in later. We want those
- // values to be consistent with the ones that appear on traces and logs. They are
- // only known in the Datadog exporter or the Datadog Agent OTLP Ingest.
- var npayloads, nbuckets, ngroups int
- for _, cp := range sp.Stats {
- npayloads++
- rmx := mmx.ResourceMetrics().AppendEmpty()
- attr := rmx.Resource().Attributes()
- attr.PutBool(keyAPMStats, true)
- putStr(attr, statsKeyHostname, cp.Hostname)
- putStr(attr, statsKeyEnv, cp.Env)
- putStr(attr, statsKeyVersion, cp.Version)
- putStr(attr, statsKeyLang, cp.Lang)
- putStr(attr, statsKeyTracerVersion, cp.TracerVersion)
- putStr(attr, statsKeyRuntimeID, cp.RuntimeID)
- putInt(attr, statsKeySequence, int64(cp.Sequence))
- putStr(attr, statsKeyAgentAggregation, cp.AgentAggregation)
- putStr(attr, statsKeyService, cp.Service)
- putStr(attr, statsKeyContainerID, cp.ContainerID)
- putStr(attr, statsKeyTags, strings.Join(cp.Tags, ","))
-
- for _, sb := range cp.Stats {
- nbuckets++
- smx := rmx.ScopeMetrics().AppendEmpty()
- for _, cgs := range sb.Stats {
- ngroups++
- mxs := smx.Metrics()
- for name, val := range map[string]uint64{
- metricNameHits: cgs.Hits,
- metricNameErrors: cgs.Errors,
- metricNameDuration: cgs.Duration,
- metricNameTopLevelHits: cgs.TopLevelHits,
- } {
- appendSum(mxs, name, int64(val), sb.Start, sb.Start+sb.Duration, cgs)
- }
- if err := appendSketch(mxs, metricNameOkSummary, cgs.OkSummary, sb.Start, sb.Start+sb.Duration, cgs); err != nil {
- t.logger.Error("Error exporting APM Stats ok_summary", zap.Error(err))
- }
- if err := appendSketch(mxs, metricNameErrorSummary, cgs.ErrorSummary, sb.Start, sb.Start+sb.Duration, cgs); err != nil {
- t.logger.Error("Error exporting APM Stats error_summary", zap.Error(err))
- }
- }
- }
- }
- return mmx
-}
-
-// appendSum appends the value val as a sum with the given name to the metric slice. It uses the appropriate fields
-// from tags to set attributes.
-func appendSum(mslice pmetric.MetricSlice, name string, val int64, start, end uint64, tags *pb.ClientGroupedStats) {
- mx := mslice.AppendEmpty()
- mx.SetName(name)
- sum := mx.SetEmptySum()
- sum.SetAggregationTemporality(pmetric.AggregationTemporalityDelta)
- sum.SetIsMonotonic(true)
-
- dp := sum.DataPoints().AppendEmpty()
- dp.SetStartTimestamp(pcommon.Timestamp(start))
- dp.SetTimestamp(pcommon.Timestamp(end))
- dp.SetIntValue(val)
- putGroupedStatsAttr(dp.Attributes(), tags)
-}
-
-// appendSketch appends the proto-encoded DDSketch from sketchBytes to the given metric slice as an ExponentialHistogram
-// with the given name, start, and end timestamps. The fields from tags are set as attributes.
-// It is assumed that the DDSketch was created by the trace-agent as a "LogCollapsingLowestDenseDDSketch" with a relative
-// accuracy of 0.001.
-func appendSketch(mslice pmetric.MetricSlice, name string, sketchBytes []byte, start, end uint64, tags *pb.ClientGroupedStats) error {
- if sketchBytes == nil {
- // no error, just nothing to do
- return nil
- }
- var msg sketchpb.DDSketch
- if err := proto.Unmarshal(sketchBytes, &msg); err != nil {
- return err
- }
- dds, err := ddsketch.FromProto(&msg)
- if err != nil {
- return err
- }
- if dds.IsEmpty() {
- // no error, just nothing to do
- return nil
- }
- mx := mslice.AppendEmpty()
- mx.SetName(name)
- hist := mx.SetEmptyExponentialHistogram()
- hist.SetAggregationTemporality(pmetric.AggregationTemporalityDelta)
-
- dp := hist.DataPoints().AppendEmpty()
- dp.SetStartTimestamp(pcommon.Timestamp(start))
- dp.SetTimestamp(pcommon.Timestamp(end))
- putGroupedStatsAttr(dp.Attributes(), tags)
- dp.SetCount(uint64(dds.GetCount()))
- if max, err := dds.GetMaxValue(); err == nil {
- dp.SetMax(max)
- }
- if min, err := dds.GetMinValue(); err == nil {
- dp.SetMin(min)
- }
- dp.SetSum(dds.GetSum())
- dp.SetZeroCount(uint64(dds.GetZeroCount()))
- // Relative accuracy (ra) in the trace-agent is 0.001. See:
- // https://github.com/DataDog/datadog-agent/blob/198e1a/pkg/trace/stats/statsraw.go#L19-L21
- // Gamma is computed as (1+ra)/(1-ra), which results in 1.02020202. Another formula for gamma
- // is 2^2^-scale. Using that formula, we conclude that a scale of 5 should be suitable, which
- // is equal to a gamma of 1.0218971486541166.
- //
- // It will not be equally accurate, but the error margin is negligible. Generally, the
- // ExponentialHistogram is simply a data recipient being converted back to the original DDSketch,
- // at which point we know the original gamma and the resulting sketch will be equivalent. The
- // only scenario when the histogram could be used is if someone exports it using a non-Datadog
- // exporter.
- dp.SetScale(5)
- storeToBuckets(dds.GetNegativeValueStore(), dp.Negative())
- storeToBuckets(dds.GetPositiveValueStore(), dp.Positive())
- return nil
-}
-
-// storeToBuckets converts a DDSketch store to an ExponentialHistogram data point buckets.
-func storeToBuckets(s store.Store, b pmetric.ExponentialHistogramDataPointBuckets) {
- offset, err := s.MinIndex()
- if err != nil {
- return
- }
- max, err := s.MaxIndex()
- if err != nil {
- return
- }
- b.SetOffset(int32(offset))
- counts := make([]uint64, max-offset+1)
- s.ForEach(func(index int, count float64) bool {
- counts[index-offset] = uint64(count)
- return false
- })
- b.BucketCounts().FromRaw(counts)
-}
-
-func putGroupedStatsAttr(m pcommon.Map, cgs *pb.ClientGroupedStats) {
- putStr(m, statsKeyService, cgs.Service)
- putStr(m, statsKeySpanName, cgs.Name)
- putStr(m, statsKeySpanResource, cgs.Resource)
- putInt(m, statsKeyHTTPStatusCode, int64(cgs.HTTPStatusCode))
- putStr(m, statsKeySpanType, cgs.Type)
- putStr(m, statsKeySpanDBType, cgs.DBType)
- if cgs.Synthetics {
- m.PutBool(statsKeySynthetics, true)
- }
-}
-
-func putStr(m pcommon.Map, k, v string) {
- if v == "" {
- return
- }
- m.PutStr(k, v)
-}
-
-func putInt(m pcommon.Map, k string, v int64) {
- if v == 0 {
- return
- }
- m.PutInt(k, v)
-}
-
-// aggregationKey specifies a set of values by which a certain aggregationMetric is grouped.
-type aggregationKey struct {
- Service string
- Name string
- Resource string
- HTTPStatusCode uint32
- Type string
- DBType string
- Synthetics bool
-}
-
-// aggregationValue specifies the set of metrics corresponding to a certain aggregationKey.
-type aggregationValue struct {
- Hits uint64
- Errors uint64
- Duration uint64
- OkSummary []byte
- ErrorSummary []byte
- TopLevelHits uint64
-}
-
-// aggregations stores aggregation values (stats) grouped by their corresponding keys.
-type aggregations struct {
- agg map[aggregationKey]*aggregationValue
-}
-
-// Value returns the aggregation value corresponding to the key found in map m.
-func (a *aggregations) Value(m pcommon.Map) *aggregationValue {
- var sntx bool
- if v, ok := m.Get(statsKeySynthetics); ok {
- sntx = v.Bool()
- }
- key := aggregationKey{
- Service: getStr(m, statsKeyService),
- Name: getStr(m, statsKeySpanName),
- Resource: getStr(m, statsKeySpanResource),
- HTTPStatusCode: uint32(getInt(m, statsKeyHTTPStatusCode)),
- Type: getStr(m, statsKeySpanType),
- DBType: getStr(m, statsKeySpanDBType),
- Synthetics: sntx,
- }
- if a.agg == nil {
- a.agg = make(map[aggregationKey]*aggregationValue)
- }
- if _, ok := a.agg[key]; !ok {
- a.agg[key] = new(aggregationValue)
- }
- return a.agg[key]
-}
-
-// Stats returns the set of pb.ClientGroupedStats based on all the aggregated key/value
-// pairs.
-func (a *aggregations) Stats() []*pb.ClientGroupedStats {
- cgs := make([]*pb.ClientGroupedStats, 0, len(a.agg))
- for k, v := range a.agg {
- cgs = append(cgs, &pb.ClientGroupedStats{
- Service: k.Service,
- Name: k.Name,
- Resource: k.Resource,
- HTTPStatusCode: k.HTTPStatusCode,
- Type: k.Type,
- DBType: k.DBType,
- Synthetics: k.Synthetics,
- Hits: v.Hits,
- Errors: v.Errors,
- Duration: v.Duration,
- OkSummary: v.OkSummary,
- ErrorSummary: v.ErrorSummary,
- TopLevelHits: v.TopLevelHits,
- })
- }
- return cgs
-}
-
-// UnsetHostnamePlaceholder is the string used as a hostname when the hostname can not be extracted from span attributes
-// by the processor. Upon decoding the metrics, the Translator will use its configured fallback SourceProvider to replace
-// it with the correct hostname.
-//
-// This isn't the most ideal approach to the problem, but provides the better user experience by avoiding the need to
-// duplicate the "exporter::datadog::hostname" configuration field as "processor::datadog::hostname". The hostname can
-// also not be left empty in case of failure to obtain it, because empty has special meaning. An empty hostname means
-// that we are in a Lambda environment. Thus, we must use a placeholder.
-const UnsetHostnamePlaceholder = "__unset__"
-
-// statsPayloadFromMetrics converts Resource Metrics to an APM Client Stats Payload.
-func (t *Translator) statsPayloadFromMetrics(rmx pmetric.ResourceMetrics) (*pb.ClientStatsPayload, error) {
- attr := rmx.Resource().Attributes()
- if v, ok := attr.Get(keyAPMStats); !ok || !v.Bool() {
- return &pb.ClientStatsPayload{}, fmt.Errorf("was asked to convert metrics to stats payload, but identifier key %q was not present. Skipping.", keyAPMStats)
- }
- hostname := getStr(attr, statsKeyHostname)
- tags := strings.Split(getStr(attr, statsKeyTags), ",")
- if hostname == UnsetHostnamePlaceholder {
- src, err := t.source(context.Background(), rmx.Resource())
- if err != nil {
- return &pb.ClientStatsPayload{}, err
- }
- switch src.Kind {
- case source.HostnameKind:
- hostname = src.Identifier
- case source.AWSECSFargateKind:
- hostname = ""
- tags = append(tags, src.Tag())
- }
- }
- cp := &pb.ClientStatsPayload{
- Hostname: hostname,
- Env: getStr(attr, statsKeyEnv),
- Version: getStr(attr, statsKeyVersion),
- Lang: getStr(attr, statsKeyLang),
- TracerVersion: getStr(attr, statsKeyTracerVersion),
- RuntimeID: getStr(attr, statsKeyRuntimeID),
- Sequence: getInt(attr, statsKeySequence),
- AgentAggregation: getStr(attr, statsKeyAgentAggregation),
- Service: getStr(attr, statsKeyService),
- ContainerID: getStr(attr, statsKeyContainerID),
- Tags: tags,
- }
- smxs := rmx.ScopeMetrics()
- for j := 0; j < smxs.Len(); j++ {
- mxs := smxs.At(j).Metrics()
- var (
- buck pb.ClientStatsBucket
- agg aggregations
- )
- for k := 0; k < mxs.Len(); k++ {
- m := mxs.At(k)
- switch m.Type() {
- case pmetric.MetricTypeSum:
- key, val := t.extractSum(m.Sum(), &buck)
- switch m.Name() {
- case metricNameHits:
- agg.Value(key).Hits = val
- case metricNameErrors:
- agg.Value(key).Errors = val
- case metricNameDuration:
- agg.Value(key).Duration = val
- case metricNameTopLevelHits:
- agg.Value(key).TopLevelHits = val
- }
- case pmetric.MetricTypeExponentialHistogram:
- key, val := t.extractSketch(m.ExponentialHistogram(), &buck)
- switch m.Name() {
- case metricNameOkSummary:
- agg.Value(key).OkSummary = val
- case metricNameErrorSummary:
- agg.Value(key).ErrorSummary = val
- }
- default:
- return &pb.ClientStatsPayload{}, fmt.Errorf(`metric named %q in Stats Payload should be of type "Sum" or "ExponentialHistogram" but is %q instead`, m.Name(), m.Type())
- }
- }
- buck.Stats = agg.Stats()
- cp.Stats = append(cp.Stats, &buck)
- }
- return cp, nil
-}
-
-// extractSketch extracts a proto-encoded version of the DDSketch found in the first data point of the given
-// ExponentialHistogram along with its attributes and updates the timestamps in the provided stats bucket.
-func (t *Translator) extractSketch(eh pmetric.ExponentialHistogram, buck *pb.ClientStatsBucket) (pcommon.Map, []byte) {
- dps := eh.DataPoints()
- if dps.Len() == 0 {
- t.logger.Debug("Stats payload exponential histogram with no data points.")
- return pcommon.NewMap(), nil
- }
- if dps.Len() > 1 {
- t.logger.Debug("Stats payload metrics should not have more than one data point. This could be an error.")
- }
- dp := dps.At(0)
- t.recordStatsBucketTimestamp(buck, dp.StartTimestamp(), dp.Timestamp())
- positive := toStore(dp.Positive())
- negative := toStore(dp.Negative())
- // use relative accuracy 0.01; same as pkg/trace/stats/statsraw.go
- index, err := mapping.NewLogarithmicMapping(0.01)
- if err != nil {
- t.logger.Debug("Error creating LogarithmicMapping.", zap.Error(err))
- return dp.Attributes(), nil
- }
- sketch := ddsketch.NewDDSketch(index, positive, negative)
- if addErr := sketch.AddWithCount(0, float64(dp.ZeroCount())); addErr != nil {
- t.logger.Debug("Error adding zero counts.", zap.Error(addErr))
- return dp.Attributes(), nil
- }
- pb := sketch.ToProto()
- b, err := proto.Marshal(pb)
- if err != nil {
- t.logger.Debug("Error marshalling stats payload sketch into proto.", zap.Error(err))
- return dp.Attributes(), nil
- }
- return dp.Attributes(), b
-}
-
-// extractSum extracts the attributes and the integer value found in the first data point of the given sum
-// and updates the given buckets timestamps.
-func (t *Translator) extractSum(sum pmetric.Sum, buck *pb.ClientStatsBucket) (pcommon.Map, uint64) {
- dps := sum.DataPoints()
- if dps.Len() == 0 {
- t.logger.Debug("APM stats payload sum with no data points.")
- return pcommon.NewMap(), 0
- }
- if dps.Len() > 1 {
- t.logger.Debug("APM stats metrics should not have more than one data point. This could be an error.")
- }
- dp := dps.At(0)
- t.recordStatsBucketTimestamp(buck, dp.StartTimestamp(), dp.Timestamp())
- return dp.Attributes(), uint64(dp.IntValue()) // more than one makes no sense
-}
-
-// recordStatsBucketTimestamp records the start & end timestamps from the given data point into the given stats bucket.
-func (t *Translator) recordStatsBucketTimestamp(buck *pb.ClientStatsBucket, startt, endt pcommon.Timestamp) {
- start := uint64(startt)
- if buck.Start != 0 && buck.Start != start {
- t.logger.Debug("APM stats data point start timestamp did not match bucket. This could be an error.")
- }
- buck.Start = start
- duration := uint64(endt) - uint64(startt)
- buck.Duration = duration
- if buck.Duration != 0 && buck.Duration != duration {
- t.logger.Debug("APM Stats data point duration did not match bucket. This could be an error.")
- }
-}
-
-func getStr(m pcommon.Map, k string) string {
- v, ok := m.Get(k)
- if !ok {
- return ""
- }
- return v.Str()
-}
-
-func getInt(m pcommon.Map, k string) uint64 {
- v, ok := m.Get(k)
- if !ok {
- return 0
- }
- return uint64(v.Int())
-}
-
// StatsToMetrics converts a StatsPayload to a pdata.Metrics
func (t *Translator) StatsToMetrics(sp *pb.StatsPayload) (pmetric.Metrics, error) {
bytes, err := proto.Marshal(sp)
diff --git a/vendor/github.com/DataDog/viper/.gitignore b/vendor/github.com/DataDog/viper/.gitignore
new file mode 100644
index 0000000000..01b5c44b9c
--- /dev/null
+++ b/vendor/github.com/DataDog/viper/.gitignore
@@ -0,0 +1,29 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.bench
+
+.vscode
+
+# exclude dependencies in the `/vendor` folder
+vendor
diff --git a/vendor/github.com/DataDog/viper/.osfm.yaml b/vendor/github.com/DataDog/viper/.osfm.yaml
new file mode 100644
index 0000000000..4e7d246aec
--- /dev/null
+++ b/vendor/github.com/DataDog/viper/.osfm.yaml
@@ -0,0 +1,9 @@
+# Open Source Fork Monitor allowlist file. This designates known OSF repos along with some attributes for calculating recency/staleness
+# Generated by Datadog Workflow Automation:
+# https://app.datadoghq.com/workflow/d284505f-a5b7-4908-bf0e-88153f38f93d
+
+# See this Confluence for more detail:
+# https://datadoghq.atlassian.net/wiki/spaces/ISEC/pages/3093168822/OSFM+-+Open+Source+Fork+Monitor
+
+expiration: null # YYYY-MM-DD (leave null if the allowance should never expire)
+type: null # e.g.: "CI/CD", "EXTERNAL", "INTERNAL", "AGENT", etc. (leave null if unsure)
\ No newline at end of file
diff --git a/vendor/github.com/DataDog/viper/LICENSE b/vendor/github.com/DataDog/viper/LICENSE
new file mode 100644
index 0000000000..4527efb9c0
--- /dev/null
+++ b/vendor/github.com/DataDog/viper/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Steve Francia
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/DataDog/viper/Makefile b/vendor/github.com/DataDog/viper/Makefile
new file mode 100644
index 0000000000..b0f9acf249
--- /dev/null
+++ b/vendor/github.com/DataDog/viper/Makefile
@@ -0,0 +1,76 @@
+# A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html
+
+OS = $(shell uname | tr A-Z a-z)
+export PATH := $(abspath bin/):${PATH}
+
+# Build variables
+BUILD_DIR ?= build
+export CGO_ENABLED ?= 0
+export GOOS = $(shell go env GOOS)
+ifeq (${VERBOSE}, 1)
+ifeq ($(filter -v,${GOARGS}),)
+ GOARGS += -v
+endif
+TEST_FORMAT = short-verbose
+endif
+
+# Dependency versions
+GOTESTSUM_VERSION = 1.6.4
+GOLANGCI_VERSION = 1.40.1
+
+# Add the ability to override some variables
+# Use with care
+-include override.mk
+
+.PHONY: clear
+clear: ## Clear the working area and the project
+ rm -rf bin/
+
+.PHONY: check
+check: test lint ## Run tests and linters
+
+bin/gotestsum: bin/gotestsum-${GOTESTSUM_VERSION}
+ @ln -sf gotestsum-${GOTESTSUM_VERSION} bin/gotestsum
+bin/gotestsum-${GOTESTSUM_VERSION}:
+ @mkdir -p bin
+ curl -L https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_${OS}_amd64.tar.gz | tar -zOxf - gotestsum > ./bin/gotestsum-${GOTESTSUM_VERSION} && chmod +x ./bin/gotestsum-${GOTESTSUM_VERSION}
+
+TEST_PKGS ?= ./...
+.PHONY: test
+test: TEST_FORMAT ?= short
+test: SHELL = /bin/bash
+test: export CGO_ENABLED=1
+test: bin/gotestsum ## Run tests
+ @mkdir -p ${BUILD_DIR}
+ bin/gotestsum --no-summary=skipped --junitfile ${BUILD_DIR}/coverage.xml --format ${TEST_FORMAT} -- -race -coverprofile=${BUILD_DIR}/coverage.txt -covermode=atomic $(filter-out -v,${GOARGS}) $(if ${TEST_PKGS},${TEST_PKGS},./...)
+
+bin/golangci-lint: bin/golangci-lint-${GOLANGCI_VERSION}
+ @ln -sf golangci-lint-${GOLANGCI_VERSION} bin/golangci-lint
+bin/golangci-lint-${GOLANGCI_VERSION}:
+ @mkdir -p bin
+ curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b ./bin/ v${GOLANGCI_VERSION}
+ @mv bin/golangci-lint "$@"
+
+.PHONY: lint
+lint: bin/golangci-lint ## Run linter
+ bin/golangci-lint run
+
+.PHONY: fix
+fix: bin/golangci-lint ## Fix lint violations
+ bin/golangci-lint run --fix
+
+# Add custom targets here
+-include custom.mk
+
+.PHONY: list
+list: ## List all make targets
+ @${MAKE} -pRrn : -f $(MAKEFILE_LIST) 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | sort
+
+.PHONY: help
+.DEFAULT_GOAL := help
+help:
+ @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
+
+# Variable outputting/exporting rules
+var-%: ; @echo $($*)
+varexport-%: ; @echo $*=$($*)
diff --git a/vendor/github.com/DataDog/viper/README.md b/vendor/github.com/DataDog/viper/README.md
new file mode 100644
index 0000000000..0208eac84d
--- /dev/null
+++ b/vendor/github.com/DataDog/viper/README.md
@@ -0,0 +1,691 @@
+![viper logo](https://cloud.githubusercontent.com/assets/173412/10886745/998df88a-8151-11e5-9448-4736db51020d.png)
+
+Go configuration with fangs!
+
+Many Go projects are built using Viper including:
+
+* [Hugo](http://gohugo.io)
+* [EMC RexRay](http://rexray.readthedocs.org/en/stable/)
+* [Imgur’s Incus](https://github.com/Imgur/incus)
+* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
+* [Docker Notary](https://github.com/docker/Notary)
+* [BloomApi](https://www.bloomapi.com/)
+* [doctl](https://github.com/digitalocean/doctl)
+* [Clairctl](https://github.com/jgsqware/clairctl)
+
+[![Build Status](https://travis-ci.org/spf13/viper.svg)](https://travis-ci.org/spf13/viper) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![GoDoc](https://godoc.org/github.com/spf13/viper?status.svg)](https://godoc.org/github.com/spf13/viper)
+
+
+## What is Viper?
+
+Viper is a complete configuration solution for Go applications including 12-Factor apps. It is designed
+to work within an application, and can handle all types of configuration needs
+and formats. It supports:
+
+* setting defaults
+* reading from JSON, TOML, YAML, HCL, and Java properties config files
+* live watching and re-reading of config files (optional)
+* reading from environment variables
+* reading from remote config systems (etcd or Consul), and watching changes
+* reading from command line flags
+* reading from buffer
+* setting explicit values
+
+Viper can be thought of as a registry for all of your applications
+configuration needs.
+
+## Why Viper?
+
+When building a modern application, you don’t want to worry about
+configuration file formats; you want to focus on building awesome software.
+Viper is here to help with that.
+
+Viper does the following for you:
+
+1. Find, load, and unmarshal a configuration file in JSON, TOML, YAML, HCL, or Java properties formats.
+2. Provide a mechanism to set default values for your different
+ configuration options.
+3. Provide a mechanism to set override values for options specified through
+ command line flags.
+4. Provide an alias system to easily rename parameters without breaking existing
+ code.
+5. Make it easy to tell the difference between when a user has provided a
+ command line or config file which is the same as the default.
+
+Viper uses the following precedence order. Each item takes precedence over the
+item below it:
+
+ * explicit call to Set
+ * flag
+ * env
+ * config
+ * key/value store
+ * default
+
+Viper configuration keys are case insensitive.
+
+## Putting Values into Viper
+
+### Establishing Defaults
+
+A good configuration system will support default values. A default value is not
+required for a key, but it’s useful in the event that a key hasn’t been set via
+config file, environment variable, remote configuration or flag.
+
+Examples:
+
+```go
+viper.SetDefault("ContentDir", "content")
+viper.SetDefault("LayoutDir", "layouts")
+viper.SetDefault("Taxonomies", map[string]string{"tag": "tags", "category": "categories"})
+```
+
+### Reading Config Files
+
+Viper requires minimal configuration so it knows where to look for config files.
+Viper supports JSON, TOML, YAML, HCL, and Java Properties files. Viper can search multiple paths, but
+currently a single Viper instance only supports a single configuration file.
+Viper does not default to any configuration search paths leaving defaults decision
+to an application.
+
+Here is an example of how to use Viper to search for and read a configuration file.
+None of the specific paths are required, but at least one path should be provided
+where a configuration file is expected.
+
+```go
+viper.SetConfigName("config") // name of config file (without extension)
+viper.AddConfigPath("/etc/appname/") // path to look for the config file in
+viper.AddConfigPath("$HOME/.appname") // call multiple times to add many search paths
+viper.AddConfigPath(".") // optionally look for config in the working directory
+err := viper.ReadInConfig() // Find and read the config file
+if err != nil { // Handle errors reading the config file
+ panic(fmt.Errorf("Fatal error config file: %s \n", err))
+}
+```
+
+### Watching and re-reading config files
+
+Viper supports the ability to have your application live read a config file while running.
+
+Gone are the days of needing to restart a server to have a config take effect,
+viper powered applications can read an update to a config file while running and
+not miss a beat.
+
+Simply tell the viper instance to watchConfig.
+Optionally you can provide a function for Viper to run each time a change occurs.
+
+**Make sure you add all of the configPaths prior to calling `WatchConfig()`**
+
+```go
+viper.WatchConfig()
+viper.OnConfigChange(func(e fsnotify.Event) {
+ fmt.Println("Config file changed:", e.Name)
+})
+```
+
+### Reading Config from io.Reader
+
+Viper predefines many configuration sources such as files, environment
+variables, flags, and remote K/V store, but you are not bound to them. You can
+also implement your own required configuration source and feed it to viper.
+
+```go
+viper.SetConfigType("yaml") // or viper.SetConfigType("YAML")
+
+// any approach to require this configuration into your program.
+var yamlExample = []byte(`
+Hacker: true
+name: steve
+hobbies:
+- skateboarding
+- snowboarding
+- go
+clothing:
+ jacket: leather
+ trousers: denim
+age: 35
+eyes : brown
+beard: true
+`)
+
+viper.ReadConfig(bytes.NewBuffer(yamlExample))
+
+viper.Get("name") // this would be "steve"
+```
+
+### Setting Overrides
+
+These could be from a command line flag, or from your own application logic.
+
+```go
+viper.Set("Verbose", true)
+viper.Set("LogFile", LogFile)
+```
+
+### Registering and Using Aliases
+
+Aliases permit a single value to be referenced by multiple keys
+
+```go
+viper.RegisterAlias("loud", "Verbose")
+
+viper.Set("verbose", true) // same result as next line
+viper.Set("loud", true) // same result as prior line
+
+viper.GetBool("loud") // true
+viper.GetBool("verbose") // true
+```
+
+### Working with Environment Variables
+
+Viper has full support for environment variables. This enables 12 factor
+applications out of the box. There are five methods that exist to aid working
+with ENV:
+
+ * `AutomaticEnv()`
+ * `BindEnv(string...) : error`
+ * `SetEnvPrefix(string)`
+ * `SetEnvKeyReplacer(string...) *strings.Replacer`
+ * `AllowEmptyEnvVar(bool)`
+
+_When working with ENV variables, it’s important to recognize that Viper
+treats ENV variables as case sensitive._
+
+Viper provides a mechanism to try to ensure that ENV variables are unique. By
+using `SetEnvPrefix`, you can tell Viper to use a prefix while reading from
+the environment variables. Both `BindEnv` and `AutomaticEnv` will use this
+prefix.
+
+`BindEnv` takes one or two parameters. The first parameter is the key name, the
+second is the name of the environment variable. The name of the environment
+variable is case sensitive. If the ENV variable name is not provided, then
+Viper will automatically assume that the key name matches the ENV variable name,
+but the ENV variable is IN ALL CAPS. When you explicitly provide the ENV
+variable name, it **does not** automatically add the prefix.
+
+One important thing to recognize when working with ENV variables is that the
+value will be read each time it is accessed. Viper does not fix the value when
+the `BindEnv` is called.
+
+`AutomaticEnv` is a powerful helper especially when combined with
+`SetEnvPrefix`. When called, Viper will check for an environment variable any
+time a `viper.Get` request is made. It will apply the following rules. It will
+check for a environment variable with a name matching the key uppercased and
+prefixed with the `EnvPrefix` if set.
+
+`SetEnvKeyReplacer` allows you to use a `strings.Replacer` object to rewrite Env
+keys to an extent. This is useful if you want to use `-` or something in your
+`Get()` calls, but want your environmental variables to use `_` delimiters. An
+example of using it can be found in `viper_test.go`.
+
+By default empty environment variables are considered unset and will fall back to
+the next configuration source. To treat empty environment variables as set, use
+the `AllowEmptyEnv` method.
+
+#### Env example
+
+```go
+SetEnvPrefix("spf") // will be uppercased automatically
+BindEnv("id")
+
+os.Setenv("SPF_ID", "13") // typically done outside of the app
+
+id := Get("id") // 13
+```
+
+### Working with Flags
+
+Viper has the ability to bind to flags. Specifically, Viper supports `Pflags`
+as used in the [Cobra](https://github.com/spf13/cobra) library.
+
+Like `BindEnv`, the value is not set when the binding method is called, but when
+it is accessed. This means you can bind as early as you want, even in an
+`init()` function.
+
+For individual flags, the `BindPFlag()` method provides this functionality.
+
+Example:
+
+```go
+serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
+viper.BindPFlag("port", serverCmd.Flags().Lookup("port"))
+```
+
+You can also bind an existing set of pflags (pflag.FlagSet):
+
+Example:
+
+```go
+pflag.Int("flagname", 1234, "help message for flagname")
+
+pflag.Parse()
+viper.BindPFlags(pflag.CommandLine)
+
+i := viper.GetInt("flagname") // retrieve values from viper instead of pflag
+```
+
+The use of [pflag](https://github.com/spf13/pflag/) in Viper does not preclude
+the use of other packages that use the [flag](https://golang.org/pkg/flag/)
+package from the standard library. The pflag package can handle the flags
+defined for the flag package by importing these flags. This is accomplished
+by a calling a convenience function provided by the pflag package called
+AddGoFlagSet().
+
+Example:
+
+```go
+package main
+
+import (
+ "flag"
+ "github.com/spf13/pflag"
+)
+
+func main() {
+
+ // using standard library "flag" package
+ flag.Int("flagname", 1234, "help message for flagname")
+
+ pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
+ pflag.Parse()
+ viper.BindPFlags(pflag.CommandLine)
+
+ i := viper.GetInt("flagname") // retrieve value from viper
+
+ ...
+}
+```
+
+#### Flag interfaces
+
+Viper provides two Go interfaces to bind other flag systems if you don’t use `Pflags`.
+
+`FlagValue` represents a single flag. This is a very simple example on how to implement this interface:
+
+```go
+type myFlag struct {}
+func (f myFlag) HasChanged() bool { return false }
+func (f myFlag) Name() string { return "my-flag-name" }
+func (f myFlag) ValueString() string { return "my-flag-value" }
+func (f myFlag) ValueType() string { return "string" }
+```
+
+Once your flag implements this interface, you can simply tell Viper to bind it:
+
+```go
+viper.BindFlagValue("my-flag-name", myFlag{})
+```
+
+`FlagValueSet` represents a group of flags. This is a very simple example on how to implement this interface:
+
+```go
+type myFlagSet struct {
+ flags []myFlag
+}
+
+func (f myFlagSet) VisitAll(fn func(FlagValue)) {
+ for _, flag := range flags {
+ fn(flag)
+ }
+}
+```
+
+Once your flag set implements this interface, you can simply tell Viper to bind it:
+
+```go
+fSet := myFlagSet{
+ flags: []myFlag{myFlag{}, myFlag{}},
+}
+viper.BindFlagValues("my-flags", fSet)
+```
+
+### Remote Key/Value Store Support
+
+To enable remote support in Viper, do a blank import of the `viper/remote`
+package:
+
+`import _ "github.com/spf13/viper/remote"`
+
+Viper will read a config string (as JSON, TOML, YAML or HCL) retrieved from a path
+in a Key/Value store such as etcd or Consul. These values take precedence over
+default values, but are overridden by configuration values retrieved from disk,
+flags, or environment variables.
+
+Viper uses [crypt](https://github.com/xordataexchange/crypt) to retrieve
+configuration from the K/V store, which means that you can store your
+configuration values encrypted and have them automatically decrypted if you have
+the correct gpg keyring. Encryption is optional.
+
+You can use remote configuration in conjunction with local configuration, or
+independently of it.
+
+`crypt` has a command-line helper that you can use to put configurations in your
+K/V store. `crypt` defaults to etcd on http://127.0.0.1:4001.
+
+```bash
+$ go get github.com/xordataexchange/crypt/bin/crypt
+$ crypt set -plaintext /config/hugo.json /Users/hugo/settings/config.json
+```
+
+Confirm that your value was set:
+
+```bash
+$ crypt get -plaintext /config/hugo.json
+```
+
+See the `crypt` documentation for examples of how to set encrypted values, or
+how to use Consul.
+
+### Remote Key/Value Store Example - Unencrypted
+
+#### etcd
+```go
+viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001","/config/hugo.json")
+viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop"
+err := viper.ReadRemoteConfig()
+```
+
+#### Consul
+You need to set a key to Consul key/value storage with JSON value containing your desired config.
+For example, create a Consul key/value store key `MY_CONSUL_KEY` with value:
+
+```json
+{
+ "port": 8080,
+ "hostname": "myhostname.com"
+}
+```
+
+```go
+viper.AddRemoteProvider("consul", "localhost:8500", "MY_CONSUL_KEY")
+viper.SetConfigType("json") // Need to explicitly set this to json
+err := viper.ReadRemoteConfig()
+
+fmt.Println(viper.Get("port")) // 8080
+fmt.Println(viper.Get("hostname")) // myhostname.com
+```
+
+### Remote Key/Value Store Example - Encrypted
+
+```go
+viper.AddSecureRemoteProvider("etcd","http://127.0.0.1:4001","/config/hugo.json","/etc/secrets/mykeyring.gpg")
+viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop"
+err := viper.ReadRemoteConfig()
+```
+
+### Watching Changes in etcd - Unencrypted
+
+```go
+// alternatively, you can create a new viper instance.
+var runtime_viper = viper.New()
+
+runtime_viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001", "/config/hugo.yml")
+runtime_viper.SetConfigType("yaml") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop"
+
+// read from remote config the first time.
+err := runtime_viper.ReadRemoteConfig()
+
+// unmarshal config
+runtime_viper.Unmarshal(&runtime_conf)
+
+// open a goroutine to watch remote changes forever
+go func(){
+ for {
+ time.Sleep(time.Second * 5) // delay after each request
+
+ // currently, only tested with etcd support
+ err := runtime_viper.WatchRemoteConfig()
+ if err != nil {
+ log.Errorf("unable to read remote config: %v", err)
+ continue
+ }
+
+ // unmarshal new config into our runtime config struct. you can also use channel
+ // to implement a signal to notify the system of the changes
+ runtime_viper.Unmarshal(&runtime_conf)
+ }
+}()
+```
+
+## Getting Values From Viper
+
+In Viper, there are a few ways to get a value depending on the value’s type.
+The following functions and methods exist:
+
+ * `Get(key string) : interface{}`
+ * `GetBool(key string) : bool`
+ * `GetFloat64(key string) : float64`
+ * `GetInt(key string) : int`
+ * `GetString(key string) : string`
+ * `GetStringMap(key string) : map[string]interface{}`
+ * `GetStringMapString(key string) : map[string]string`
+ * `GetStringSlice(key string) : []string`
+ * `GetTime(key string) : time.Time`
+ * `GetDuration(key string) : time.Duration`
+ * `IsSet(key string) : bool`
+ * `AllSettings() : map[string]interface{}`
+
+One important thing to recognize is that each Get function will return a zero
+value if it’s not found. To check if a given key exists, the `IsSet()` method
+has been provided.
+
+Example:
+```go
+viper.GetString("logfile") // case-insensitive Setting & Getting
+if viper.GetBool("verbose") {
+ fmt.Println("verbose enabled")
+}
+```
+### Accessing nested keys
+
+The accessor methods also accept formatted paths to deeply nested keys. For
+example, if the following JSON file is loaded:
+
+```json
+{
+ "host": {
+ "address": "localhost",
+ "port": 5799
+ },
+ "datastore": {
+ "metric": {
+ "host": "127.0.0.1",
+ "port": 3099
+ },
+ "warehouse": {
+ "host": "198.0.0.1",
+ "port": 2112
+ }
+ }
+}
+
+```
+
+Viper can access a nested field by passing a `.` delimited path of keys:
+
+```go
+GetString("datastore.metric.host") // (returns "127.0.0.1")
+```
+
+This obeys the precedence rules established above; the search for the path
+will cascade through the remaining configuration registries until found.
+
+For example, given this configuration file, both `datastore.metric.host` and
+`datastore.metric.port` are already defined (and may be overridden). If in addition
+`datastore.metric.protocol` was defined in the defaults, Viper would also find it.
+
+However, if `datastore.metric` was overridden (by a flag, an environment variable,
+the `Set()` method, …) with an immediate value, then all sub-keys of
+`datastore.metric` become undefined, they are “shadowed” by the higher-priority
+configuration level.
+
+Lastly, if there exists a key that matches the delimited key path, its value
+will be returned instead. E.g.
+
+```json
+{
+ "datastore.metric.host": "0.0.0.0",
+ "host": {
+ "address": "localhost",
+ "port": 5799
+ },
+ "datastore": {
+ "metric": {
+ "host": "127.0.0.1",
+ "port": 3099
+ },
+ "warehouse": {
+ "host": "198.0.0.1",
+ "port": 2112
+ }
+ }
+}
+
+GetString("datastore.metric.host") // returns "0.0.0.0"
+```
+
+### Extract sub-tree
+
+Extract sub-tree from Viper.
+
+For example, `viper` represents:
+
+```json
+app:
+ cache1:
+ max-items: 100
+ item-size: 64
+ cache2:
+ max-items: 200
+ item-size: 80
+```
+
+After executing:
+
+```go
+subv := viper.Sub("app.cache1")
+```
+
+`subv` represents:
+
+```json
+max-items: 100
+item-size: 64
+```
+
+Suppose we have:
+
+```go
+func NewCache(cfg *Viper) *Cache {...}
+```
+
+which creates a cache based on config information formatted as `subv`.
+Now it’s easy to create these 2 caches separately as:
+
+```go
+cfg1 := viper.Sub("app.cache1")
+cache1 := NewCache(cfg1)
+
+cfg2 := viper.Sub("app.cache2")
+cache2 := NewCache(cfg2)
+```
+
+### Unmarshaling
+
+You also have the option of Unmarshaling all or a specific value to a struct, map,
+etc.
+
+There are two methods to do this:
+
+ * `Unmarshal(rawVal interface{}) : error`
+ * `UnmarshalKey(key string, rawVal interface{}) : error`
+
+Example:
+
+```go
+type config struct {
+ Port int
+ Name string
+ PathMap string `mapstructure:"path_map"`
+}
+
+var C config
+
+err := Unmarshal(&C)
+if err != nil {
+ t.Fatalf("unable to decode into struct, %v", err)
+}
+```
+
+### Marshalling to string
+
+You may need to marhsal all the settings held in viper into a string rather than write them to a file.
+You can use your favorite format's marshaller with the config returned by `AllSettings()`.
+
+```go
+import (
+ yaml "gopkg.in/yaml.v2"
+ // ...
+)
+
+func yamlStringSettings() string {
+ c := viper.AllSettings()
+ bs, err := yaml.Marshal(c)
+ if err != nil {
+ t.Fatalf("unable to marshal config to YAML: %v", err)
+ }
+ return string(bs)
+}
+```
+
+## Viper or Vipers?
+
+Viper comes ready to use out of the box. There is no configuration or
+initialization needed to begin using Viper. Since most applications will want
+to use a single central repository for their configuration, the viper package
+provides this. It is similar to a singleton.
+
+In all of the examples above, they demonstrate using viper in its singleton
+style approach.
+
+### Working with multiple vipers
+
+You can also create many different vipers for use in your application. Each will
+have its own unique set of configurations and values. Each can read from a
+different config file, key value store, etc. All of the functions that viper
+package supports are mirrored as methods on a viper.
+
+Example:
+
+```go
+x := viper.New()
+y := viper.New()
+
+x.SetDefault("ContentDir", "content")
+y.SetDefault("ContentDir", "foobar")
+
+//...
+```
+
+When working with multiple vipers, it is up to the user to keep track of the
+different vipers.
+
+## Q & A
+
+Q: Why not INI files?
+
+A: Ini files are pretty awful. There’s no standard format, and they are hard to
+validate. Viper is designed to work with JSON, TOML or YAML files. If someone
+really wants to add this feature, I’d be happy to merge it. It’s easy to specify
+which formats your application will permit.
+
+Q: Why is it called “Viper”?
+
+A: Viper is designed to be a [companion](http://en.wikipedia.org/wiki/Viper_(G.I._Joe))
+to [Cobra](https://github.com/spf13/cobra). While both can operate completely
+independently, together they make a powerful pair to handle much of your
+application foundation needs.
+
+Q: Why is it called “Cobra”?
+
+A: Is there a better name for a [commander](http://en.wikipedia.org/wiki/Cobra_Commander)?
diff --git a/vendor/github.com/DataDog/viper/flags.go b/vendor/github.com/DataDog/viper/flags.go
new file mode 100644
index 0000000000..dd32f4e1c2
--- /dev/null
+++ b/vendor/github.com/DataDog/viper/flags.go
@@ -0,0 +1,57 @@
+package viper
+
+import "github.com/spf13/pflag"
+
+// FlagValueSet is an interface that users can implement
+// to bind a set of flags to viper.
+type FlagValueSet interface {
+ VisitAll(fn func(FlagValue))
+}
+
+// FlagValue is an interface that users can implement
+// to bind different flags to viper.
+type FlagValue interface {
+ HasChanged() bool
+ Name() string
+ ValueString() string
+ ValueType() string
+}
+
+// pflagValueSet is a wrapper around *pflag.ValueSet
+// that implements FlagValueSet.
+type pflagValueSet struct {
+ flags *pflag.FlagSet
+}
+
+// VisitAll iterates over all *pflag.Flag inside the *pflag.FlagSet.
+func (p pflagValueSet) VisitAll(fn func(flag FlagValue)) {
+ p.flags.VisitAll(func(flag *pflag.Flag) {
+ fn(pflagValue{flag})
+ })
+}
+
+// pflagValue is a wrapper aroung *pflag.flag
+// that implements FlagValue
+type pflagValue struct {
+ flag *pflag.Flag
+}
+
+// HasChanges returns whether the flag has changes or not.
+func (p pflagValue) HasChanged() bool {
+ return p.flag.Changed
+}
+
+// Name returns the name of the flag.
+func (p pflagValue) Name() string {
+ return p.flag.Name
+}
+
+// ValueString returns the value of the flag as a string.
+func (p pflagValue) ValueString() string {
+ return p.flag.Value.String()
+}
+
+// ValueType returns the type of the flag as a string.
+func (p pflagValue) ValueType() string {
+ return p.flag.Value.Type()
+}
diff --git a/vendor/github.com/DataDog/viper/util.go b/vendor/github.com/DataDog/viper/util.go
new file mode 100644
index 0000000000..1da18271b1
--- /dev/null
+++ b/vendor/github.com/DataDog/viper/util.go
@@ -0,0 +1,227 @@
+// Copyright © 2014 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Viper is a application configuration system.
+// It believes that applications can be configured a variety of ways
+// via flags, ENVIRONMENT variables, configuration files retrieved
+// from the file system, or a remote key/value store.
+
+package viper
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "unicode"
+
+ "github.com/spf13/afero"
+ "github.com/spf13/cast"
+ jww "github.com/spf13/jwalterweatherman"
+)
+
+// ConfigParseError denotes failing to parse configuration file.
+type ConfigParseError struct {
+ err error
+}
+
+// Error returns the formatted configuration error.
+func (pe ConfigParseError) Error() string {
+ return fmt.Sprintf("While parsing config: %s", pe.err.Error())
+}
+
+// toCaseInsensitiveValue checks if the value is a map;
+// if so, create a copy and lower-case the keys recursively.
+func toCaseInsensitiveValue(value interface{}) interface{} {
+ switch v := value.(type) {
+ case map[interface{}]interface{}:
+ value = copyAndInsensitiviseMap(cast.ToStringMap(v))
+ case map[string]interface{}:
+ value = copyAndInsensitiviseMap(v)
+ }
+
+ return value
+}
+
+// copyAndInsensitiviseMap behaves like insensitiviseMap, but creates a copy of
+// any map it makes case insensitive.
+func copyAndInsensitiviseMap(m map[string]interface{}) map[string]interface{} {
+ nm := make(map[string]interface{})
+
+ for key, val := range m {
+ lkey := strings.ToLower(key)
+ switch v := val.(type) {
+ case map[interface{}]interface{}:
+ nm[lkey] = copyAndInsensitiviseMap(cast.ToStringMap(v))
+ case map[string]interface{}:
+ nm[lkey] = copyAndInsensitiviseMap(v)
+ default:
+ nm[lkey] = v
+ }
+ }
+
+ return nm
+}
+
+func insensitiviseMap(m map[string]interface{}) {
+ for key, val := range m {
+ switch val.(type) {
+ case map[interface{}]interface{}:
+ // nested map: cast and recursively insensitivise
+ val = cast.ToStringMap(val)
+ insensitiviseMap(val.(map[string]interface{}))
+ case map[string]interface{}:
+ // nested map: recursively insensitivise
+ insensitiviseMap(val.(map[string]interface{}))
+ }
+
+ lower := strings.ToLower(key)
+ if key != lower {
+ // remove old key (not lower-cased)
+ delete(m, key)
+ }
+ // update map
+ m[lower] = val
+ }
+}
+
+func absPathify(inPath string) string {
+ jww.INFO.Println("Trying to resolve absolute path to", inPath)
+
+ if strings.HasPrefix(inPath, "$HOME") {
+ inPath = userHomeDir() + inPath[5:]
+ }
+
+ if strings.HasPrefix(inPath, "$") {
+ end := strings.Index(inPath, string(os.PathSeparator))
+ inPath = os.Getenv(inPath[1:end]) + inPath[end:]
+ }
+
+ if filepath.IsAbs(inPath) {
+ return filepath.Clean(inPath)
+ }
+
+ p, err := filepath.Abs(inPath)
+ if err == nil {
+ return filepath.Clean(p)
+ }
+
+ jww.ERROR.Println("Couldn't discover absolute path")
+ jww.ERROR.Println(err)
+ return ""
+}
+
+// Check if File / Directory Exists
+func exists(fs afero.Fs, path string) (bool, error) {
+ _, err := fs.Stat(path)
+ if err == nil {
+ return true, nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+}
+
+func stringInSlice(a string, list []string) bool {
+ for _, b := range list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
+
+func userHomeDir() string {
+ if runtime.GOOS == "windows" {
+ home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
+ if home == "" {
+ home = os.Getenv("USERPROFILE")
+ }
+ return home
+ }
+ return os.Getenv("HOME")
+}
+
+func safeMul(a, b uint) (uint, error) {
+ c := a * b
+ if a > 1 && b > 1 && c/b != a {
+ return 0, fmt.Errorf("multiplication overflows uint: %d*%d", a, b)
+ }
+ return c, nil
+}
+
+// parseSizeInBytes converts strings like 1GB or 12 mb into an unsigned integer number of bytes
+func parseSizeInBytes(sizeStr string) (uint, error) {
+ rawStr := sizeStr
+ sizeStr = strings.TrimSpace(sizeStr)
+ lastChar := len(sizeStr) - 1
+ multiplier := uint(1)
+
+ if lastChar > 0 {
+ if sizeStr[lastChar] == 'b' || sizeStr[lastChar] == 'B' {
+ if lastChar > 1 {
+ switch unicode.ToLower(rune(sizeStr[lastChar-1])) {
+ case 'k':
+ multiplier = 1 << 10
+ sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+ case 'm':
+ multiplier = 1 << 20
+ sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+ case 'g':
+ multiplier = 1 << 30
+ sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+ default:
+ multiplier = 1
+ sizeStr = strings.TrimSpace(sizeStr[:lastChar])
+ }
+ }
+ }
+ }
+
+ num, err := cast.ToUintE(sizeStr)
+ if err != nil {
+ return 0, err
+ }
+
+ size, err := safeMul(num, multiplier)
+ if err != nil {
+ return 0, fmt.Errorf("unable to cast %q to uint: %s", rawStr, err)
+ }
+
+ return size, nil
+}
+
+// deepSearch scans deep maps, following the key indexes listed in the
+// sequence "path".
+// The last value is expected to be another map, and is returned.
+//
+// In case intermediate keys do not exist, or map to a non-map value,
+// a new map is created and inserted, and the search continues from there:
+// the initial map "m" may be modified!
+func deepSearch(m map[string]interface{}, path []string) map[string]interface{} {
+ for _, k := range path {
+ m2, ok := m[k]
+ if !ok {
+ // intermediate key does not exist
+ // => create it and continue from there
+ m3 := make(map[string]interface{})
+ m[k] = m3
+ m = m3
+ continue
+ }
+ m3, ok := m2.(map[string]interface{})
+ if !ok {
+ // intermediate key is a value
+ // => replace with a new map
+ m3 = make(map[string]interface{})
+ m[k] = m3
+ }
+ // continue search from here
+ m = m3
+ }
+ return m
+}
diff --git a/vendor/github.com/DataDog/viper/viper.go b/vendor/github.com/DataDog/viper/viper.go
new file mode 100644
index 0000000000..b77e682152
--- /dev/null
+++ b/vendor/github.com/DataDog/viper/viper.go
@@ -0,0 +1,2004 @@
+// Copyright © 2014 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Viper is a application configuration system.
+// It believes that applications can be configured a variety of ways
+// via flags, ENVIRONMENT variables, configuration files retrieved
+// from the file system, or a remote key/value store.
+
+// Each item takes precedence over the item below it:
+
+// overrides
+// flag
+// env
+// config
+// key/value store
+// default
+
+package viper
+
+import (
+ "bytes"
+ "encoding/csv"
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "time"
+
+ yaml "gopkg.in/yaml.v2"
+
+ "github.com/fsnotify/fsnotify"
+ "github.com/hashicorp/hcl"
+ "github.com/hashicorp/hcl/hcl/printer"
+ "github.com/magiconair/properties"
+ "github.com/mitchellh/mapstructure"
+ toml "github.com/pelletier/go-toml"
+ "github.com/spf13/afero"
+ "github.com/spf13/cast"
+ jww "github.com/spf13/jwalterweatherman"
+ "github.com/spf13/pflag"
+)
+
+// ConfigMarshalError happens when failing to marshal the configuration.
+type ConfigMarshalError struct {
+ err error
+}
+
+// Error returns the formatted configuration error.
+func (e ConfigMarshalError) Error() string {
+ return fmt.Sprintf("While marshaling config: %s", e.err.Error())
+}
+
+var v *Viper
+
+type RemoteResponse struct {
+ Value []byte
+ Error error
+}
+
+func init() {
+ v = New()
+}
+
+type remoteConfigFactory interface {
+ Get(rp RemoteProvider) (io.Reader, error)
+ Watch(rp RemoteProvider) (io.Reader, error)
+ WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool)
+}
+
+// RemoteConfig is optional, see the remote package
+var RemoteConfig remoteConfigFactory
+
+// UnsupportedConfigError denotes encountering an unsupported
+// configuration filetype.
+type UnsupportedConfigError string
+
+// Error returns the formatted configuration error.
+func (str UnsupportedConfigError) Error() string {
+ return fmt.Sprintf("Unsupported Config Type %q", string(str))
+}
+
+// UnsupportedRemoteProviderError denotes encountering an unsupported remote
+// provider. Currently only etcd and Consul are supported.
+type UnsupportedRemoteProviderError string
+
+// Error returns the formatted remote provider error.
+func (str UnsupportedRemoteProviderError) Error() string {
+ return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str))
+}
+
+// RemoteConfigError denotes encountering an error while trying to
+// pull the configuration from the remote provider.
+type RemoteConfigError string
+
+// Error returns the formatted remote provider error
+func (rce RemoteConfigError) Error() string {
+ return fmt.Sprintf("Remote Configurations Error: %s", string(rce))
+}
+
+// ConfigFileNotFoundError denotes failing to find configuration file.
+type ConfigFileNotFoundError struct {
+ name, locations string
+}
+
+// Error returns the formatted configuration error.
+func (fnfe ConfigFileNotFoundError) Error() string {
+ return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations)
+}
+
+// A DecoderConfigOption can be passed to viper.Unmarshal to configure
+// mapstructure.DecoderConfig options
+type DecoderConfigOption func(*mapstructure.DecoderConfig)
+
+// DecodeHook returns a DecoderConfigOption which overrides the default
+// DecoderConfig.DecodeHook value, the default is:
+//
+// mapstructure.ComposeDecodeHookFunc(
+// mapstructure.StringToTimeDurationHookFunc(),
+// mapstructure.StringToSliceHookFunc(","),
+// )
+func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption {
+ return func(c *mapstructure.DecoderConfig) {
+ c.DecodeHook = hook
+ }
+}
+
+// Viper is a prioritized configuration registry. It
+// maintains a set of configuration sources, fetches
+// values to populate those, and provides them according
+// to the source's priority.
+// The priority of the sources is the following:
+// 1. overrides
+// 2. flags
+// 3. env. variables
+// 4. config file
+// 5. key/value store
+// 6. defaults
+//
+// For example, if values from the following sources were loaded:
+//
+// Defaults : {
+// "secret": "",
+// "user": "default",
+// "endpoint": "https://localhost"
+// }
+// Config : {
+// "user": "root"
+// "secret": "defaultsecret"
+// }
+// Env : {
+// "secret": "somesecretkey"
+// }
+//
+// The resulting config will have the following values:
+//
+// {
+// "secret": "somesecretkey",
+// "user": "root",
+// "endpoint": "https://localhost"
+// }
+type Viper struct {
+ // Delimiter that separates a list of keys
+ // used to access a nested value in one go
+ keyDelim string
+
+ // A set of paths to look for the config file in
+ configPaths []string
+
+ // The filesystem to read config from.
+ fs afero.Fs
+
+ // A set of remote providers to search for the configuration
+ remoteProviders []*defaultRemoteProvider
+
+ // Name of file to look for inside the path
+ configName string
+ configFile string
+ configType string
+ envPrefix string
+
+ automaticEnvApplied bool
+ envKeyReplacer *strings.Replacer
+ allowEmptyEnv bool
+
+ config map[string]interface{}
+ override map[string]interface{}
+ defaults map[string]interface{}
+ kvstore map[string]interface{}
+ pflags map[string]FlagValue
+ env map[string][]string
+ envTransform map[string]func(string) interface{}
+ aliases map[string]string
+ knownKeys map[string]interface{}
+ typeByDefValue bool
+
+ // Store read properties on the object so that we can write back in order with comments.
+ // This will only be used if the configuration read is a properties file.
+ properties *properties.Properties
+
+ onConfigChange func(fsnotify.Event)
+}
+
+// New returns an initialized Viper instance.
+func New() *Viper {
+ v := new(Viper)
+ v.keyDelim = "."
+ v.configName = "config"
+ v.fs = afero.NewOsFs()
+ v.config = make(map[string]interface{})
+ v.override = make(map[string]interface{})
+ v.defaults = make(map[string]interface{})
+ v.kvstore = make(map[string]interface{})
+ v.pflags = make(map[string]FlagValue)
+ v.env = make(map[string][]string)
+ v.envTransform = make(map[string]func(string) interface{})
+ v.aliases = make(map[string]string)
+ v.knownKeys = make(map[string]interface{})
+ v.typeByDefValue = false
+
+ return v
+}
+
+// Intended for testing, will reset all to default settings.
+// In the public interface for the viper package so applications
+// can use it in their testing as well.
+func Reset() {
+ v = New()
+ SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl"}
+ SupportedRemoteProviders = []string{"etcd", "consul"}
+}
+
+type defaultRemoteProvider struct {
+ provider string
+ endpoint string
+ path string
+ secretKeyring string
+}
+
+func (rp defaultRemoteProvider) Provider() string {
+ return rp.provider
+}
+
+func (rp defaultRemoteProvider) Endpoint() string {
+ return rp.endpoint
+}
+
+func (rp defaultRemoteProvider) Path() string {
+ return rp.path
+}
+
+func (rp defaultRemoteProvider) SecretKeyring() string {
+ return rp.secretKeyring
+}
+
+// RemoteProvider stores the configuration necessary
+// to connect to a remote key/value store.
+// Optional secretKeyring to unencrypt encrypted values
+// can be provided.
+type RemoteProvider interface {
+ Provider() string
+ Endpoint() string
+ Path() string
+ SecretKeyring() string
+}
+
+// SupportedExts are universally supported extensions.
+var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl"}
+
+// SupportedRemoteProviders are universally supported remote providers.
+var SupportedRemoteProviders = []string{"etcd", "consul"}
+
+func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) }
+func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) {
+ v.onConfigChange = run
+}
+
+// SetConfigFile explicitly defines the path, name and extension of the config file.
+// Viper will use this and not check any of the config paths.
+func SetConfigFile(in string) { v.SetConfigFile(in) }
+func (v *Viper) SetConfigFile(in string) {
+ if in != "" {
+ v.configFile = in
+ }
+}
+
+// SetEnvPrefix defines a prefix that ENVIRONMENT variables will use.
+// E.g. if your prefix is "spf", the env registry will look for env
+// variables that start with "SPF_".
+func SetEnvPrefix(in string) { v.SetEnvPrefix(in) }
+func (v *Viper) SetEnvPrefix(in string) {
+ if in != "" {
+ v.envPrefix = in
+ }
+}
+
+// SetEnvKeyTransformer allows defining a transformer function which decides
+// how an environment variables value gets assigned to key.
+func SetEnvKeyTransformer(key string, fn func(string) interface{}) { v.SetEnvKeyTransformer(key, fn) }
+func (v *Viper) SetEnvKeyTransformer(key string, fn func(string) interface{}) {
+ v.envTransform[strings.ToLower(key)] = fn
+}
+
+func (v *Viper) mergeWithEnvPrefix(in string) string {
+ if v.envPrefix != "" {
+ return strings.ToUpper(v.envPrefix + "_" + in)
+ }
+
+ return strings.ToUpper(in)
+}
+
+// AllowEmptyEnv tells Viper to consider set,
+// but empty environment variables as valid values instead of falling back.
+// For backward compatibility reasons this is false by default.
+func AllowEmptyEnv(allowEmptyEnv bool) { v.AllowEmptyEnv(allowEmptyEnv) }
+func (v *Viper) AllowEmptyEnv(allowEmptyEnv bool) {
+ v.allowEmptyEnv = allowEmptyEnv
+}
+
+// TODO: should getEnv logic be moved into find(). Can generalize the use of
+// rewriting keys many things, Ex: Get('someKey') -> some_key
+// (camel case to snake case for JSON keys perhaps)
+
+// getEnv is a wrapper around os.Getenv which replaces characters in the original
+// key. This allows env vars which have different keys than the config object
+// keys.
+func (v *Viper) getEnv(key string) (string, bool) {
+ if v.envKeyReplacer != nil {
+ key = v.envKeyReplacer.Replace(key)
+ }
+
+ val, ok := os.LookupEnv(key)
+
+ return val, ok && (v.allowEmptyEnv || val != "")
+}
+
+// ConfigFileUsed returns the file used to populate the config registry.
+func ConfigFileUsed() string { return v.ConfigFileUsed() }
+func (v *Viper) ConfigFileUsed() string { return v.configFile }
+
+// AddConfigPath adds a path for Viper to search for the config file in.
+// Can be called multiple times to define multiple search paths.
+func AddConfigPath(in string) { v.AddConfigPath(in) }
+func (v *Viper) AddConfigPath(in string) {
+ if in != "" {
+ absin := absPathify(in)
+ jww.INFO.Println("adding", absin, "to paths to search")
+ if !stringInSlice(absin, v.configPaths) {
+ v.configPaths = append(v.configPaths, absin)
+ }
+ }
+}
+
+// AddRemoteProvider adds a remote configuration source.
+// Remote Providers are searched in the order they are added.
+// provider is a string value, "etcd" or "consul" are currently supported.
+// endpoint is the url. etcd requires http://ip:port consul requires ip:port
+// path is the path in the k/v store to retrieve configuration
+// To retrieve a config file called myapp.json from /configs/myapp.json
+// you should set path to /configs and set config name (SetConfigName()) to
+// "myapp"
+func AddRemoteProvider(provider, endpoint, path string) error {
+ return v.AddRemoteProvider(provider, endpoint, path)
+}
+func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error {
+ if !stringInSlice(provider, SupportedRemoteProviders) {
+ return UnsupportedRemoteProviderError(provider)
+ }
+ if provider != "" && endpoint != "" {
+ jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint)
+ rp := &defaultRemoteProvider{
+ endpoint: endpoint,
+ provider: provider,
+ path: path,
+ }
+ if !v.providerPathExists(rp) {
+ v.remoteProviders = append(v.remoteProviders, rp)
+ }
+ }
+ return nil
+}
+
+// AddSecureRemoteProvider adds a remote configuration source.
+// Secure Remote Providers are searched in the order they are added.
+// provider is a string value, "etcd" or "consul" are currently supported.
+// endpoint is the url. etcd requires http://ip:port consul requires ip:port
+// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg
+// path is the path in the k/v store to retrieve configuration
+// To retrieve a config file called myapp.json from /configs/myapp.json
+// you should set path to /configs and set config name (SetConfigName()) to
+// "myapp"
+// Secure Remote Providers are implemented with github.com/xordataexchange/crypt
+func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
+ return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring)
+}
+
+func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
+ if !stringInSlice(provider, SupportedRemoteProviders) {
+ return UnsupportedRemoteProviderError(provider)
+ }
+ if provider != "" && endpoint != "" {
+ jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint)
+ rp := &defaultRemoteProvider{
+ endpoint: endpoint,
+ provider: provider,
+ path: path,
+ secretKeyring: secretkeyring,
+ }
+ if !v.providerPathExists(rp) {
+ v.remoteProviders = append(v.remoteProviders, rp)
+ }
+ }
+ return nil
+}
+
+func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool {
+ for _, y := range v.remoteProviders {
+ if reflect.DeepEqual(y, p) {
+ return true
+ }
+ }
+ return false
+}
+
+// searchMap recursively searches for a value for path in source map.
+// Returns nil if not found.
+// Note: This assumes that the path entries and map keys are lower cased.
+func (v *Viper) searchMap(source map[string]interface{}, path []string) interface{} {
+ if len(path) == 0 {
+ return source
+ }
+
+ next, ok := source[path[0]]
+ if ok {
+ // Fast path
+ if len(path) == 1 {
+ return next
+ }
+
+ // Nested case
+ switch next.(type) {
+ case map[interface{}]interface{}:
+ return v.searchMap(cast.ToStringMap(next), path[1:])
+ case map[string]interface{}:
+ // Type assertion is safe here since it is only reached
+ // if the type of `next` is the same as the type being asserted
+ return v.searchMap(next.(map[string]interface{}), path[1:])
+ default:
+ // got a value but nested key expected, return "nil" for not found
+ return nil
+ }
+ }
+ return nil
+}
+
+// searchMapWithPathPrefixes recursively searches for a value for path in source map.
+//
+// While searchMap() considers each path element as a single map key, this
+// function searches for, and prioritizes, merged path elements.
+// e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar"
+// is also defined, this latter value is returned for path ["foo", "bar"].
+//
+// This should be useful only at config level (other maps may not contain dots
+// in their keys).
+//
+// Note: This assumes that the path entries and map keys are lower cased.
+func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path []string) interface{} {
+ if len(path) == 0 {
+ return source
+ }
+
+ // search for path prefixes, starting from the longest one
+ for i := len(path); i > 0; i-- {
+ prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim))
+
+ next, ok := source[prefixKey]
+ if ok {
+ // Fast path
+ if i == len(path) {
+ return next
+ }
+
+ // Nested case
+ var val interface{}
+ switch next.(type) {
+ case map[interface{}]interface{}:
+ val = v.searchMapWithPathPrefixes(cast.ToStringMap(next), path[i:])
+ case map[string]interface{}:
+ // Type assertion is safe here since it is only reached
+ // if the type of `next` is the same as the type being asserted
+ val = v.searchMapWithPathPrefixes(next.(map[string]interface{}), path[i:])
+ default:
+ // got a value but nested key expected, do nothing and look for next prefix
+ }
+ if val != nil {
+ return val
+ }
+ }
+ }
+
+ // not found
+ return nil
+}
+
+// isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere
+// on its path in the map.
+// e.g., if "foo.bar" has a value in the given map, it “shadows”
+// "foo.bar.baz" in a lower-priority map
+func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) string {
+ var parentVal interface{}
+ for i := 1; i < len(path); i++ {
+ parentVal = v.searchMap(m, path[0:i])
+ if parentVal == nil {
+ // not found, no need to add more path elements
+ return ""
+ }
+ switch parentVal.(type) {
+ case map[interface{}]interface{}:
+ continue
+ case map[string]interface{}:
+ continue
+ default:
+ // parentVal is a regular value which shadows "path"
+ return strings.Join(path[0:i], v.keyDelim)
+ }
+ }
+ return ""
+}
+
+// isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere
+// in a sub-path of the map.
+// e.g., if "foo.bar" has a value in the given map, it “shadows”
+// "foo.bar.baz" in a lower-priority map
+func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string {
+ // unify input map
+ var m map[string]interface{}
+ switch mi.(type) {
+ case map[string]string, map[string]FlagValue:
+ m = cast.ToStringMap(mi)
+ default:
+ return ""
+ }
+
+ // scan paths
+ var parentKey string
+ for i := 1; i < len(path); i++ {
+ parentKey = strings.Join(path[0:i], v.keyDelim)
+ if _, ok := m[parentKey]; ok {
+ return parentKey
+ }
+ }
+ return ""
+}
+
+// isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere
+// in the environment, when automatic env is on.
+// e.g., if "foo.bar" has a value in the environment, it “shadows”
+// "foo.bar.baz" in a lower-priority map
+func (v *Viper) isPathShadowedInAutoEnv(path []string) string {
+ var parentKey string
+ for i := 1; i < len(path); i++ {
+ parentKey = strings.Join(path[0:i], v.keyDelim)
+ if _, ok := v.getEnv(v.mergeWithEnvPrefix(parentKey)); ok {
+ return parentKey
+ }
+ }
+ return ""
+}
+
+// SetTypeByDefaultValue enables or disables the inference of a key value's
+// type when the Get function is used based upon a key's default value as
+// opposed to the value returned based on the normal fetch logic.
+//
+// For example, if a key has a default value of []string{} and the same key
+// is set via an environment variable to "a b c", a call to the Get function
+// would return a string slice for the key if the key's type is inferred by
+// the default value and the Get function would return:
+//
+// []string {"a", "b", "c"}
+//
+// Otherwise the Get function would return:
+//
+// "a b c"
+func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) }
+func (v *Viper) SetTypeByDefaultValue(enable bool) {
+ v.typeByDefValue = enable
+}
+
+// GetViper gets the global Viper instance.
+func GetViper() *Viper {
+ return v
+}
+
+// Get can retrieve any value given the key to use.
+// Get is case-insensitive for a key.
+// Get has the behavior of returning the value associated with the first
+// place from where it is set. Viper will check in the following order:
+// override, flag, env, config file, key/value store, default
+//
+// Get returns an interface. For a specific value use one of the Get____ methods.
+func Get(key string) interface{} { return v.Get(key) }
+func (v *Viper) Get(key string) interface{} {
+ val, _ := v.GetE(key)
+ return val
+}
+
+// GetSkipDefault returns an interface. For a specific value use one of the Get____ methods.
+func GetSkipDefault(key string) interface{} { return v.GetSkipDefault(key) }
+func (v *Viper) GetSkipDefault(key string) interface{} {
+ val, _ := v.GetESkipDefault(key)
+ return val
+}
+
+// GetE is like Get but also returns parsing errors.
+func GetE(key string) (interface{}, error) { return v.GetE(key) }
+func (v *Viper) GetE(key string) (interface{}, error) {
+ lcaseKey := strings.ToLower(key)
+ val := v.find(lcaseKey, false)
+ if val == nil {
+ return nil, nil
+ }
+ return v.castByDefValue(lcaseKey, val)
+}
+
+// GetESkipDefault is like GetE but ignors defaults.
+func GetESkipDefault(key string) (interface{}, error) { return v.GetESkipDefault(key) }
+func (v *Viper) GetESkipDefault(key string) (interface{}, error) {
+ lcaseKey := strings.ToLower(key)
+ val := v.find(lcaseKey, true)
+ if val == nil {
+ return nil, nil
+ }
+ return v.castByDefValue(lcaseKey, val)
+}
+
+func (v *Viper) castByDefValue(lcaseKey string, val interface{}) (interface{}, error) {
+ if v.typeByDefValue {
+ // TODO(bep) this branch isn't covered by a single test.
+ valType := val
+ path := strings.Split(lcaseKey, v.keyDelim)
+ defVal := v.searchMap(v.defaults, path)
+ if defVal != nil {
+ valType = defVal
+ }
+
+ switch valType.(type) {
+ case bool:
+ return cast.ToBoolE(val)
+ case string:
+ return cast.ToStringE(val)
+ case int32, int16, int8, int:
+ return cast.ToIntE(val)
+ case int64:
+ return cast.ToInt64E(val)
+ case float64, float32:
+ return cast.ToFloat64E(val)
+ case time.Time:
+ return cast.ToTimeE(val)
+ case time.Duration:
+ return cast.ToDurationE(val)
+ case []string:
+ return cast.ToStringSliceE(val)
+ }
+ }
+
+ return val, nil
+}
+
+// GetRaw is the same as Get except that it always return an uncast value.
+func GetRaw(key string) interface{} { return v.GetRaw(key) }
+func (v *Viper) GetRaw(key string) interface{} {
+ lcaseKey := strings.ToLower(key)
+ return v.find(lcaseKey, false)
+}
+
+// Sub returns new Viper instance representing a sub tree of this instance.
+// Sub is case-insensitive for a key.
+func Sub(key string) *Viper { return v.Sub(key) }
+func (v *Viper) Sub(key string) *Viper {
+ subv := New()
+ data := v.Get(key)
+ if data == nil {
+ return nil
+ }
+
+ if reflect.TypeOf(data).Kind() == reflect.Map {
+ subv.config = cast.ToStringMap(data)
+ return subv
+ }
+ return nil
+}
+
+// GetString returns the value associated with the key as a string.
+func GetString(key string) string { return v.GetString(key) }
+func (v *Viper) GetString(key string) string {
+ return cast.ToString(v.Get(key))
+}
+
+// GetStringE is the same as GetString but also returns parsing errors.
+func GetStringE(key string) (string, error) { return v.GetStringE(key) }
+func (v *Viper) GetStringE(key string) (string, error) {
+ return cast.ToStringE(v.GetRaw(key))
+}
+
+// GetBool returns the value associated with the key as a boolean.
+func GetBool(key string) bool { return v.GetBool(key) }
+func (v *Viper) GetBool(key string) bool {
+ return cast.ToBool(v.Get(key))
+}
+
+// GetBoolE is the same as GetBool but also returns parsing errors.
+func GetBoolE(key string) (bool, error) { return v.GetBoolE(key) }
+func (v *Viper) GetBoolE(key string) (bool, error) {
+ return cast.ToBoolE(v.GetRaw(key))
+}
+
+// GetInt returns the value associated with the key as an integer.
+func GetInt(key string) int { return v.GetInt(key) }
+func (v *Viper) GetInt(key string) int {
+ return cast.ToInt(v.Get(key))
+}
+
+// GetIntE is the same as GetInt but also returns parsing errors.
+func GetIntE(key string) (int, error) { return v.GetIntE(key) }
+func (v *Viper) GetIntE(key string) (int, error) {
+ return cast.ToIntE(v.GetRaw(key))
+}
+
+// GetInt32 returns the value associated with the key as an integer.
+func GetInt32(key string) int32 { return v.GetInt32(key) }
+func (v *Viper) GetInt32(key string) int32 {
+ return cast.ToInt32(v.Get(key))
+}
+
+// GetInt32E is the same as GetInt32 but also returns parsing errors.
+func GetInt32E(key string) (int32, error) { return v.GetInt32E(key) }
+func (v *Viper) GetInt32E(key string) (int32, error) {
+ return cast.ToInt32E(v.GetRaw(key))
+}
+
+// GetInt64 returns the value associated with the key as an integer.
+func GetInt64(key string) int64 { return v.GetInt64(key) }
+func (v *Viper) GetInt64(key string) int64 {
+ return cast.ToInt64(v.Get(key))
+}
+
+// GetInt64E is the same as GetInt64 but also returns parsing errors.
+func GetInt64E(key string) (int64, error) { return v.GetInt64E(key) }
+func (v *Viper) GetInt64E(key string) (int64, error) {
+ return cast.ToInt64E(v.GetRaw(key))
+}
+
+// GetFloat64 returns the value associated with the key as a float64.
+func GetFloat64(key string) float64 { return v.GetFloat64(key) }
+func (v *Viper) GetFloat64(key string) float64 {
+ return cast.ToFloat64(v.GetRaw(key))
+}
+
+// GetFloat64E is the same as GetFloat64 but also returns parsing errors.
+func GetFloat64E(key string) (float64, error) { return v.GetFloat64E(key) }
+func (v *Viper) GetFloat64E(key string) (float64, error) {
+ return cast.ToFloat64E(v.GetRaw(key))
+}
+
+// GetTime returns the value associated with the key as time.
+func GetTime(key string) time.Time { return v.GetTime(key) }
+func (v *Viper) GetTime(key string) time.Time {
+ return cast.ToTime(v.Get(key))
+}
+
+// GetTimeE is the same as GetTime but also returns parsing errors.
+func GetTimeE(key string) (time.Time, error) { return v.GetTimeE(key) }
+func (v *Viper) GetTimeE(key string) (time.Time, error) {
+ return cast.ToTimeE(v.GetRaw(key))
+}
+
+// GetDuration returns the value associated with the key as a duration.
+func GetDuration(key string) time.Duration { return v.GetDuration(key) }
+func (v *Viper) GetDuration(key string) time.Duration {
+ return cast.ToDuration(v.Get(key))
+}
+
+// GetDurationE is the same as GetDuration but also returns parsing errors.
+func GetDurationE(key string) (time.Duration, error) { return v.GetDurationE(key) }
+func (v *Viper) GetDurationE(key string) (time.Duration, error) {
+ return cast.ToDurationE(v.GetRaw(key))
+}
+
+// GetStringSlice returns the value associated with the key as a slice of strings.
+func GetStringSlice(key string) []string { return v.GetStringSlice(key) }
+func (v *Viper) GetStringSlice(key string) []string {
+ return cast.ToStringSlice(v.Get(key))
+}
+
+// GetStringSliceE is the same as GetStringSlice but also returns parsing errors.
+func GetStringSliceE(key string) ([]string, error) { return v.GetStringSliceE(key) }
+func (v *Viper) GetStringSliceE(key string) ([]string, error) {
+ return cast.ToStringSliceE(v.GetRaw(key))
+}
+
+// GetStringMap returns the value associated with the key as a map of interfaces.
+func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) }
+func (v *Viper) GetStringMap(key string) map[string]interface{} {
+ return cast.ToStringMap(v.Get(key))
+}
+
+// GetStringMapE is the same as GetStringMap but also returns parsing errors.
+func GetStringMapE(key string) (map[string]interface{}, error) { return v.GetStringMapE(key) }
+func (v *Viper) GetStringMapE(key string) (map[string]interface{}, error) {
+ return cast.ToStringMapE(v.GetRaw(key))
+}
+
+// GetStringMapString returns the value associated with the key as a map of strings.
+func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) }
+func (v *Viper) GetStringMapString(key string) map[string]string {
+ return cast.ToStringMapString(v.Get(key))
+}
+
+// GetStringMapStringE is the same as GetStringMapString but also returns parsing errors.
+func GetStringMapStringE(key string) (map[string]string, error) { return v.GetStringMapStringE(key) }
+func (v *Viper) GetStringMapStringE(key string) (map[string]string, error) {
+ return cast.ToStringMapStringE(v.GetRaw(key))
+}
+
+// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings.
+func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) }
+func (v *Viper) GetStringMapStringSlice(key string) map[string][]string {
+ return cast.ToStringMapStringSlice(v.Get(key))
+}
+
+// GetStringMapStringSliceE is the same as GetStringMapStringSlice but also returns parsing errors.
+func GetStringMapStringSliceE(key string) (map[string][]string, error) {
+ return v.GetStringMapStringSliceE(key)
+}
+func (v *Viper) GetStringMapStringSliceE(key string) (map[string][]string, error) {
+ return cast.ToStringMapStringSliceE(v.GetRaw(key))
+}
+
+// GetSizeInBytes returns the size of the value associated with the given key
+// in bytes.
+func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) }
+func (v *Viper) GetSizeInBytes(key string) uint {
+ sizeStr := cast.ToString(v.Get(key))
+ size, _ := parseSizeInBytes(sizeStr)
+ return size
+}
+
+// GetSizeInBytesE is the same as GetSizeInBytes but also returns parsing errors.
+func GetSizeInBytesE(key string) (uint, error) { return v.GetSizeInBytesE(key) }
+func (v *Viper) GetSizeInBytesE(key string) (uint, error) {
+ sizeStr, err := cast.ToStringE(v.GetRaw(key))
+ if err != nil {
+ return 0, err
+ }
+ return parseSizeInBytes(sizeStr)
+}
+
+// UnmarshalKey takes a single key and unmarshals it into a Struct.
+func UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error {
+ return v.UnmarshalKey(key, rawVal, opts...)
+}
+func (v *Viper) UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error {
+ err := decode(v.Get(key), defaultDecoderConfig(rawVal, opts...))
+
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Unmarshal unmarshals the config into a Struct. Make sure that the tags
+// on the fields of the structure are properly set.
+func Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error {
+ return v.Unmarshal(rawVal, opts...)
+}
+func (v *Viper) Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error {
+ err := decode(v.AllSettings(), defaultDecoderConfig(rawVal, opts...))
+
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot
+// of time.Duration values & string slices
+func defaultDecoderConfig(output interface{}, opts ...DecoderConfigOption) *mapstructure.DecoderConfig {
+ c := &mapstructure.DecoderConfig{
+ Metadata: nil,
+ Result: output,
+ WeaklyTypedInput: true,
+ DecodeHook: mapstructure.ComposeDecodeHookFunc(
+ mapstructure.StringToTimeDurationHookFunc(),
+ mapstructure.StringToSliceHookFunc(","),
+ ),
+ }
+ for _, opt := range opts {
+ opt(c)
+ }
+ return c
+}
+
+// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality
+func decode(input interface{}, config *mapstructure.DecoderConfig) error {
+ decoder, err := mapstructure.NewDecoder(config)
+ if err != nil {
+ return err
+ }
+ return decoder.Decode(input)
+}
+
+// UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent
+// in the destination struct.
+func (v *Viper) UnmarshalExact(rawVal interface{}) error {
+ config := defaultDecoderConfig(rawVal)
+ config.ErrorUnused = true
+
+ err := decode(v.AllSettings(), config)
+
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// BindPFlags binds a full flag set to the configuration, using each flag's long
+// name as the config key.
+func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) }
+func (v *Viper) BindPFlags(flags *pflag.FlagSet) error {
+ return v.BindFlagValues(pflagValueSet{flags})
+}
+
+// BindPFlag binds a specific key to a pflag (as used by cobra).
+// Example (where serverCmd is a Cobra instance):
+//
+// serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
+// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port"))
+//
+func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) }
+func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error {
+ return v.BindFlagValue(key, pflagValue{flag})
+}
+
+// BindFlagValues binds a full FlagValue set to the configuration, using each flag's long
+// name as the config key.
+func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) }
+func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) {
+ flags.VisitAll(func(flag FlagValue) {
+ if err = v.BindFlagValue(flag.Name(), flag); err != nil {
+ return
+ }
+ })
+ return nil
+}
+
+// BindFlagValue binds a specific key to a FlagValue.
+// Example (where serverCmd is a Cobra instance):
+//
+// serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
+// Viper.BindFlagValue("port", serverCmd.Flags().Lookup("port"))
+//
+func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) }
+func (v *Viper) BindFlagValue(key string, flag FlagValue) error {
+ if flag == nil {
+ return fmt.Errorf("flag for %q is nil", key)
+ }
+ v.pflags[strings.ToLower(key)] = flag
+ return nil
+}
+
+// BindEnv binds a Viper key to a ENV variable.
+// ENV variables are case sensitive.
+// If only a key is provided, it will use the env key matching the key, uppercased.
+// EnvPrefix will be used when set when env name is not provided.
+func BindEnv(input ...string) error { return v.BindEnv(input...) }
+func (v *Viper) BindEnv(input ...string) error {
+ if len(input) == 0 {
+ return fmt.Errorf("BindEnv missing key to bind to")
+ }
+
+ key := strings.ToLower(input[0])
+ var envkeys []string
+
+ if len(input) == 1 {
+ envkeys = []string{v.mergeWithEnvPrefix(key)}
+ } else {
+ envkeys = input[1:]
+ }
+
+ v.env[key] = append(v.env[key], envkeys...)
+ v.SetKnown(key)
+
+ return nil
+}
+
+// Given a key, find the value.
+// Viper will check in the following order:
+// flag, env, config file, key/value store, default.
+// If skipDefault is set to true, find will ignore default values.
+// Viper will check to see if an alias exists first.
+// Note: this assumes a lower-cased key given.
+func (v *Viper) find(lcaseKey string, skipDefault bool) interface{} {
+
+ var (
+ val interface{}
+ exists bool
+ path = strings.Split(lcaseKey, v.keyDelim)
+ nested = len(path) > 1
+ )
+
+ // compute the path through the nested maps to the nested value
+ if nested && v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)) != "" {
+ return nil
+ }
+
+ // if the requested key is an alias, then return the proper key
+ lcaseKey = v.realKey(lcaseKey)
+ path = strings.Split(lcaseKey, v.keyDelim)
+ nested = len(path) > 1
+
+ // Set() override first
+ val = v.searchMap(v.override, path)
+ if val != nil {
+ return val
+ }
+ if nested && v.isPathShadowedInDeepMap(path, v.override) != "" {
+ return nil
+ }
+
+ // PFlag override next
+ flag, exists := v.pflags[lcaseKey]
+ if exists && flag.HasChanged() {
+ switch flag.ValueType() {
+ case "int", "int8", "int16", "int32", "int64":
+ return cast.ToInt(flag.ValueString())
+ case "bool":
+ return cast.ToBool(flag.ValueString())
+ case "stringSlice":
+ s := strings.TrimPrefix(flag.ValueString(), "[")
+ s = strings.TrimSuffix(s, "]")
+ res, _ := readAsCSV(s)
+ return res
+ default:
+ return flag.ValueString()
+ }
+ }
+ if nested && v.isPathShadowedInFlatMap(path, v.pflags) != "" {
+ return nil
+ }
+
+ // Env override next
+ if v.automaticEnvApplied {
+ // even if it hasn't been registered, if automaticEnv is used,
+ // check any Get request
+ if val, ok := v.getEnv(v.mergeWithEnvPrefix(lcaseKey)); ok {
+ return val
+ }
+ if nested && v.isPathShadowedInAutoEnv(path) != "" {
+ return nil
+ }
+ }
+ envkeys, exists := v.env[lcaseKey]
+ if exists {
+ for _, key := range envkeys {
+ if val, ok := v.getEnv(key); ok {
+ if fn, ok := v.envTransform[lcaseKey]; ok {
+ return fn(val)
+ }
+ return val
+ }
+ }
+ }
+ if nested && v.isPathShadowedInFlatMap(path, v.env) != "" {
+ return nil
+ }
+
+ // Config file next
+ val = v.searchMapWithPathPrefixes(v.config, path)
+ if val != nil {
+ return val
+ }
+ if nested && v.isPathShadowedInDeepMap(path, v.config) != "" {
+ return nil
+ }
+
+ // K/V store next
+ val = v.searchMap(v.kvstore, path)
+ if val != nil {
+ return val
+ }
+ if nested && v.isPathShadowedInDeepMap(path, v.kvstore) != "" {
+ return nil
+ }
+
+ // Default next
+ if !skipDefault {
+ val = v.searchMap(v.defaults, path)
+ if val != nil {
+ return val
+ }
+ if nested && v.isPathShadowedInDeepMap(path, v.defaults) != "" {
+ return nil
+ }
+ }
+
+ // last chance: if no other value is returned and a flag does exist for the value,
+ // get the flag's value even if the flag's value has not changed
+ if flag, exists := v.pflags[lcaseKey]; exists {
+ switch flag.ValueType() {
+ case "int", "int8", "int16", "int32", "int64":
+ return cast.ToInt(flag.ValueString())
+ case "bool":
+ return cast.ToBool(flag.ValueString())
+ case "stringSlice":
+ s := strings.TrimPrefix(flag.ValueString(), "[")
+ s = strings.TrimSuffix(s, "]")
+ res, _ := readAsCSV(s)
+ return res
+ default:
+ return flag.ValueString()
+ }
+ }
+ // last item, no need to check shadowing
+
+ return nil
+}
+
+func readAsCSV(val string) ([]string, error) {
+ if val == "" {
+ return []string{}, nil
+ }
+ stringReader := strings.NewReader(val)
+ csvReader := csv.NewReader(stringReader)
+ return csvReader.Read()
+}
+
+// IsSet checks to see if the key has been set in any of the data locations.
+// IsSet is case-insensitive for a key.
+func IsSet(key string) bool { return v.IsSet(key) }
+func (v *Viper) IsSet(key string) bool {
+ lcaseKey := strings.ToLower(key)
+ val := v.find(lcaseKey, false)
+ return val != nil
+}
+
+// AutomaticEnv has Viper check ENV variables for all.
+// keys set in config, default & flags
+func AutomaticEnv() { v.AutomaticEnv() }
+func (v *Viper) AutomaticEnv() {
+ v.automaticEnvApplied = true
+}
+
+// SetEnvKeyReplacer sets the strings.Replacer on the viper object
+// Useful for mapping an environmental variable to a key that does
+// not match it.
+func SetEnvKeyReplacer(r *strings.Replacer) { v.SetEnvKeyReplacer(r) }
+func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) {
+ v.envKeyReplacer = r
+}
+
+// Aliases provide another accessor for the same key.
+// This enables one to change a name without breaking the application
+func RegisterAlias(alias string, key string) { v.RegisterAlias(alias, key) }
+func (v *Viper) RegisterAlias(alias string, key string) {
+ v.registerAlias(alias, strings.ToLower(key))
+}
+
+func (v *Viper) registerAlias(alias string, key string) {
+ alias = strings.ToLower(alias)
+ if alias != key && alias != v.realKey(key) {
+ _, exists := v.aliases[alias]
+
+ if !exists {
+ // if we alias something that exists in one of the maps to another
+ // name, we'll never be able to get that value using the original
+ // name, so move the config value to the new realkey.
+ if val, ok := v.config[alias]; ok {
+ delete(v.config, alias)
+ v.config[key] = val
+ }
+ if val, ok := v.kvstore[alias]; ok {
+ delete(v.kvstore, alias)
+ v.kvstore[key] = val
+ }
+ if val, ok := v.defaults[alias]; ok {
+ delete(v.defaults, alias)
+ v.defaults[key] = val
+ }
+ if val, ok := v.override[alias]; ok {
+ delete(v.override, alias)
+ v.override[key] = val
+ }
+ v.aliases[alias] = key
+ }
+ } else {
+ jww.WARN.Println("Creating circular reference alias", alias, key, v.realKey(key))
+ }
+ v.SetKnown(alias)
+}
+
+func (v *Viper) realKey(key string) string {
+ newkey, exists := v.aliases[key]
+ if exists {
+ jww.DEBUG.Println("Alias", key, "to", newkey)
+ return v.realKey(newkey)
+ }
+ return key
+}
+
+// InConfig checks to see if the given key (or an alias) is in the config file.
+func InConfig(key string) bool { return v.InConfig(key) }
+func (v *Viper) InConfig(key string) bool {
+ // if the requested key is an alias, then return the proper key
+ key = v.realKey(key)
+
+ _, exists := v.config[key]
+ return exists
+}
+
+// SetDefault sets the default value for this key.
+// SetDefault is case-insensitive for a key.
+// Default only used when no value is provided by the user via flag, config or ENV.
+func SetDefault(key string, value interface{}) { v.SetDefault(key, value) }
+func (v *Viper) SetDefault(key string, value interface{}) {
+ // If alias passed in, then set the proper default
+ key = v.realKey(strings.ToLower(key))
+ value = toCaseInsensitiveValue(value)
+
+ path := strings.Split(key, v.keyDelim)
+ lastKey := strings.ToLower(path[len(path)-1])
+ deepestMap := deepSearch(v.defaults, path[0:len(path)-1])
+
+ // set innermost value
+ deepestMap[lastKey] = value
+ v.SetKnown(key)
+}
+
+// SetKnown adds a key to the set of known valid config keys
+func SetKnown(key string) { v.SetKnown(key) }
+func (v *Viper) SetKnown(key string) {
+ key = strings.ToLower(key)
+ splitPath := strings.Split(key, v.keyDelim)
+ for j := range splitPath {
+ subKey := strings.Join(splitPath[:j+1], v.keyDelim)
+ v.knownKeys[subKey] = struct{}{}
+ }
+}
+
+// GetKnownKeys returns all the keys that meet at least one of these criteria:
+// 1) have a default, 2) have an environment variable binded, 3) are an alias or 4) have been SetKnown()
+func GetKnownKeys() map[string]interface{} { return v.GetKnownKeys() }
+func (v *Viper) GetKnownKeys() map[string]interface{} {
+ ret := make(map[string]interface{})
+ for key, value := range v.knownKeys {
+ ret[key] = value
+ }
+ return ret
+}
+
+// IsKnown returns whether the given key has been set as a known key
+func IsKnown(key string) bool { return v.IsKnown(key) }
+func (v *Viper) IsKnown(key string) bool {
+ key = strings.ToLower(key)
+ _, exists := v.knownKeys[key]
+ return exists
+}
+
+// Set sets the value for the key in the override register.
+// Set is case-insensitive for a key.
+// Will be used instead of values obtained via
+// flags, config file, ENV, default, or key/value store.
+func Set(key string, value interface{}) { v.Set(key, value) }
+func (v *Viper) Set(key string, value interface{}) {
+ // If alias passed in, then set the proper override
+ key = v.realKey(strings.ToLower(key))
+ value = toCaseInsensitiveValue(value)
+
+ path := strings.Split(key, v.keyDelim)
+ lastKey := strings.ToLower(path[len(path)-1])
+ deepestMap := deepSearch(v.override, path[0:len(path)-1])
+
+ // set innermost value
+ deepestMap[lastKey] = value
+}
+
+// ReadInConfig will discover and load the configuration file from disk
+// and key/value stores, searching in one of the defined paths.
+func ReadInConfig() error { return v.ReadInConfig() }
+func (v *Viper) ReadInConfig() error {
+ jww.INFO.Println("Attempting to read in config file")
+ filename, err := v.getConfigFile()
+ if err != nil {
+ return err
+ }
+
+ if !stringInSlice(v.getConfigType(), SupportedExts) {
+ return UnsupportedConfigError(v.getConfigType())
+ }
+
+ jww.DEBUG.Println("Reading file: ", filename)
+ file, err := afero.ReadFile(v.fs, filename)
+ if err != nil {
+ return err
+ }
+
+ config := make(map[string]interface{})
+
+ err = v.unmarshalReader(bytes.NewReader(file), config)
+ if err != nil {
+ return err
+ }
+
+ v.config = config
+ return nil
+}
+
+// MergeInConfig merges a new configuration with an existing config.
+func MergeInConfig() error { return v.MergeInConfig() }
+func (v *Viper) MergeInConfig() error {
+ jww.INFO.Println("Attempting to merge in config file")
+ filename, err := v.getConfigFile()
+ if err != nil {
+ return err
+ }
+
+ if !stringInSlice(v.getConfigType(), SupportedExts) {
+ return UnsupportedConfigError(v.getConfigType())
+ }
+
+ file, err := afero.ReadFile(v.fs, filename)
+ if err != nil {
+ return err
+ }
+
+ return v.MergeConfig(bytes.NewReader(file))
+}
+
+// ReadConfig will read a configuration file, setting existing keys to nil if the
+// key does not exist in the file.
+func ReadConfig(in io.Reader) error { return v.ReadConfig(in) }
+func (v *Viper) ReadConfig(in io.Reader) error {
+ v.config = make(map[string]interface{})
+ return v.unmarshalReader(in, v.config)
+}
+
+// MergeConfig merges a new configuration with an existing config.
+func MergeConfig(in io.Reader) error { return v.MergeConfig(in) }
+func (v *Viper) MergeConfig(in io.Reader) error {
+ cfg := make(map[string]interface{})
+ if err := v.unmarshalReader(in, cfg); err != nil {
+ return err
+ }
+ return v.MergeConfigMap(cfg)
+}
+
+// MergeConfigMap merges the configuration from the map given with an existing config.
+// Note that the map given may be modified.
+func MergeConfigMap(cfg map[string]interface{}) error { return v.MergeConfigMap(cfg) }
+func (v *Viper) MergeConfigMap(cfg map[string]interface{}) error {
+ if v.config == nil {
+ v.config = make(map[string]interface{})
+ }
+ insensitiviseMap(cfg)
+ mergeMaps(cfg, v.config, nil)
+ return nil
+}
+
+// MergeConfigOverride merges a new configuration within the config at the
+// highest lever of priority (similar to the 'Set' method). Key set here will
+// always be retrieved before values from env, files...
+func MergeConfigOverride(in io.Reader) error { return v.MergeConfigOverride(in) }
+func (v *Viper) MergeConfigOverride(in io.Reader) error {
+ if v.override == nil {
+ v.override = make(map[string]interface{})
+ }
+ cfg := make(map[string]interface{})
+ if err := v.unmarshalReader(in, cfg); err != nil {
+ return err
+ }
+ insensitiviseMap(cfg)
+ mergeMaps(cfg, v.override, nil)
+ return nil
+}
+
+// WriteConfig writes the current configuration to a file.
+func WriteConfig() error { return v.WriteConfig() }
+func (v *Viper) WriteConfig() error {
+ filename, err := v.getConfigFile()
+ if err != nil {
+ return err
+ }
+ return v.writeConfig(filename, true)
+}
+
+// SafeWriteConfig writes current configuration to file only if the file does not exist.
+func SafeWriteConfig() error { return v.SafeWriteConfig() }
+func (v *Viper) SafeWriteConfig() error {
+ filename, err := v.getConfigFile()
+ if err != nil {
+ return err
+ }
+ return v.writeConfig(filename, false)
+}
+
+// WriteConfigAs writes current configuration to a given filename.
+func WriteConfigAs(filename string) error { return v.WriteConfigAs(filename) }
+func (v *Viper) WriteConfigAs(filename string) error {
+ return v.writeConfig(filename, true)
+}
+
+// SafeWriteConfigAs writes current configuration to a given filename if it does not exist.
+func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) }
+func (v *Viper) SafeWriteConfigAs(filename string) error {
+ return v.writeConfig(filename, false)
+}
+
+func writeConfig(filename string, force bool) error { return v.writeConfig(filename, force) }
+func (v *Viper) writeConfig(filename string, force bool) error {
+ jww.INFO.Println("Attempting to write configuration to file.")
+ ext := filepath.Ext(filename)
+ if len(ext) <= 1 {
+ return fmt.Errorf("Filename: %s requires valid extension.", filename)
+ }
+ configType := ext[1:]
+ if !stringInSlice(configType, SupportedExts) {
+ return UnsupportedConfigError(configType)
+ }
+ if v.config == nil {
+ v.config = make(map[string]interface{})
+ }
+ var flags int
+ if force == true {
+ flags = os.O_CREATE | os.O_TRUNC | os.O_WRONLY
+ } else {
+ if _, err := os.Stat(filename); os.IsNotExist(err) {
+ flags = os.O_WRONLY
+ } else {
+ return fmt.Errorf("File: %s exists. Use WriteConfig to overwrite.", filename)
+ }
+ }
+ f, err := v.fs.OpenFile(filename, flags, os.FileMode(0644))
+ if err != nil {
+ return err
+ }
+ return v.marshalWriter(f, configType)
+}
+
+// Unmarshal a Reader into a map.
+// Should probably be an unexported function.
+func unmarshalReader(in io.Reader, c map[string]interface{}) error {
+ return v.unmarshalReader(in, c)
+}
+func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error {
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(in)
+
+ switch strings.ToLower(v.getConfigType()) {
+ case "yaml", "yml":
+ // Try UnmarshalStrict first, so we can warn about duplicated keys
+ if strictErr := yaml.UnmarshalStrict(buf.Bytes(), &c); strictErr != nil {
+ if err := yaml.Unmarshal(buf.Bytes(), &c); err != nil {
+ return ConfigParseError{err}
+ }
+ log.Printf("warning reading config file: %v\n", strictErr)
+ }
+
+ case "json":
+ if err := json.Unmarshal(buf.Bytes(), &c); err != nil {
+ return ConfigParseError{err}
+ }
+
+ case "hcl":
+ obj, err := hcl.Parse(string(buf.Bytes()))
+ if err != nil {
+ return ConfigParseError{err}
+ }
+ if err = hcl.DecodeObject(&c, obj); err != nil {
+ return ConfigParseError{err}
+ }
+
+ case "toml":
+ tree, err := toml.LoadReader(buf)
+ if err != nil {
+ return ConfigParseError{err}
+ }
+ tmap := tree.ToMap()
+ for k, v := range tmap {
+ c[k] = v
+ }
+
+ case "properties", "props", "prop":
+ v.properties = properties.NewProperties()
+ var err error
+ if v.properties, err = properties.Load(buf.Bytes(), properties.UTF8); err != nil {
+ return ConfigParseError{err}
+ }
+ for _, key := range v.properties.Keys() {
+ value, _ := v.properties.Get(key)
+ // recursively build nested maps
+ path := strings.Split(key, ".")
+ lastKey := strings.ToLower(path[len(path)-1])
+ deepestMap := deepSearch(c, path[0:len(path)-1])
+ // set innermost value
+ deepestMap[lastKey] = value
+ }
+ }
+
+ insensitiviseMap(c)
+ return nil
+}
+
+// Marshal a map into Writer.
+func marshalWriter(f afero.File, configType string) error {
+ return v.marshalWriter(f, configType)
+}
+func (v *Viper) marshalWriter(f afero.File, configType string) error {
+ c := v.AllSettings()
+ switch configType {
+ case "json":
+ b, err := json.MarshalIndent(c, "", " ")
+ if err != nil {
+ return ConfigMarshalError{err}
+ }
+ _, err = f.WriteString(string(b))
+ if err != nil {
+ return ConfigMarshalError{err}
+ }
+
+ case "hcl":
+ b, err := json.Marshal(c)
+ ast, err := hcl.Parse(string(b))
+ if err != nil {
+ return ConfigMarshalError{err}
+ }
+ err = printer.Fprint(f, ast.Node)
+ if err != nil {
+ return ConfigMarshalError{err}
+ }
+
+ case "prop", "props", "properties":
+ if v.properties == nil {
+ v.properties = properties.NewProperties()
+ }
+ p := v.properties
+ for _, key := range v.AllKeys() {
+ _, _, err := p.Set(key, v.GetString(key))
+ if err != nil {
+ return ConfigMarshalError{err}
+ }
+ }
+ _, err := p.WriteComment(f, "#", properties.UTF8)
+ if err != nil {
+ return ConfigMarshalError{err}
+ }
+
+ case "toml":
+ t, err := toml.TreeFromMap(c)
+ if err != nil {
+ return ConfigMarshalError{err}
+ }
+ s := t.String()
+ if _, err := f.WriteString(s); err != nil {
+ return ConfigMarshalError{err}
+ }
+
+ case "yaml", "yml":
+ b, err := yaml.Marshal(c)
+ if err != nil {
+ return ConfigMarshalError{err}
+ }
+ if _, err = f.WriteString(string(b)); err != nil {
+ return ConfigMarshalError{err}
+ }
+ }
+ return nil
+}
+
+func keyExists(k string, m map[string]interface{}) string {
+ lk := strings.ToLower(k)
+ for mk := range m {
+ lmk := strings.ToLower(mk)
+ if lmk == lk {
+ return mk
+ }
+ }
+ return ""
+}
+
+func castToMapStringInterface(
+ src map[interface{}]interface{}) map[string]interface{} {
+ tgt := map[string]interface{}{}
+ for k, v := range src {
+ tgt[fmt.Sprintf("%v", k)] = v
+ }
+ return tgt
+}
+
+func castMapStringSliceToMapInterface(src map[string][]string) map[string]interface{} {
+ tgt := map[string]interface{}{}
+ for k, v := range src {
+ tgt[k] = v
+ }
+ return tgt
+}
+
+func castMapStringToMapInterface(src map[string]string) map[string]interface{} {
+ tgt := map[string]interface{}{}
+ for k, v := range src {
+ tgt[k] = v
+ }
+ return tgt
+}
+
+func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} {
+ tgt := map[string]interface{}{}
+ for k, v := range src {
+ tgt[k] = v
+ }
+ return tgt
+}
+
+// mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's
+// insistence on parsing nested structures as `map[interface{}]interface{}`
+// instead of using a `string` as the key for nest structures beyond one level
+// deep. Both map types are supported as there is a go-yaml fork that uses
+// `map[string]interface{}` instead.
+func mergeMaps(
+ src, tgt map[string]interface{}, itgt map[interface{}]interface{}) {
+ for sk, sv := range src {
+ tk := keyExists(sk, tgt)
+ if tk == "" {
+ jww.TRACE.Printf("tk=\"\", tgt[%s]=%v", sk, sv)
+ tgt[sk] = sv
+ if itgt != nil {
+ itgt[sk] = sv
+ }
+ continue
+ }
+
+ tv, ok := tgt[tk]
+ if !ok {
+ jww.TRACE.Printf("tgt[%s] != ok, tgt[%s]=%v", tk, sk, sv)
+ tgt[sk] = sv
+ if itgt != nil {
+ itgt[sk] = sv
+ }
+ continue
+ }
+
+ svType := reflect.TypeOf(sv)
+ tvType := reflect.TypeOf(tv)
+ if svType != tvType {
+ jww.ERROR.Printf(
+ "svType != tvType; key=%s, st=%v, tt=%v, sv=%v, tv=%v",
+ sk, svType, tvType, sv, tv)
+ continue
+ }
+
+ jww.TRACE.Printf("processing key=%s, st=%v, tt=%v, sv=%v, tv=%v",
+ sk, svType, tvType, sv, tv)
+
+ switch ttv := tv.(type) {
+ case map[interface{}]interface{}:
+ jww.TRACE.Printf("merging maps (must convert)")
+ tsv := sv.(map[interface{}]interface{})
+ ssv := castToMapStringInterface(tsv)
+ stv := castToMapStringInterface(ttv)
+ mergeMaps(ssv, stv, ttv)
+ case map[string]interface{}:
+ jww.TRACE.Printf("merging maps")
+ mergeMaps(sv.(map[string]interface{}), ttv, nil)
+ default:
+ jww.TRACE.Printf("setting value")
+ tgt[tk] = sv
+ if itgt != nil {
+ itgt[tk] = sv
+ }
+ }
+ }
+}
+
+// ReadRemoteConfig attempts to get configuration from a remote source
+// and read it in the remote configuration registry.
+func ReadRemoteConfig() error { return v.ReadRemoteConfig() }
+func (v *Viper) ReadRemoteConfig() error {
+ return v.getKeyValueConfig()
+}
+
+func WatchRemoteConfig() error { return v.WatchRemoteConfig() }
+func (v *Viper) WatchRemoteConfig() error {
+ return v.watchKeyValueConfig()
+}
+
+func (v *Viper) WatchRemoteConfigOnChannel() error {
+ return v.watchKeyValueConfigOnChannel()
+}
+
+// Retrieve the first found remote configuration.
+func (v *Viper) getKeyValueConfig() error {
+ if RemoteConfig == nil {
+ return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'")
+ }
+
+ for _, rp := range v.remoteProviders {
+ val, err := v.getRemoteConfig(rp)
+ if err != nil {
+ continue
+ }
+ v.kvstore = val
+ return nil
+ }
+ return RemoteConfigError("No Files Found")
+}
+
+func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) {
+ reader, err := RemoteConfig.Get(provider)
+ if err != nil {
+ return nil, err
+ }
+ err = v.unmarshalReader(reader, v.kvstore)
+ return v.kvstore, err
+}
+
+// Retrieve the first found remote configuration.
+func (v *Viper) watchKeyValueConfigOnChannel() error {
+ for _, rp := range v.remoteProviders {
+ respc, _ := RemoteConfig.WatchChannel(rp)
+ //Todo: Add quit channel
+ go func(rc <-chan *RemoteResponse) {
+ for {
+ b := <-rc
+ reader := bytes.NewReader(b.Value)
+ v.unmarshalReader(reader, v.kvstore)
+ }
+ }(respc)
+ return nil
+ }
+ return RemoteConfigError("No Files Found")
+}
+
+// Retrieve the first found remote configuration.
+func (v *Viper) watchKeyValueConfig() error {
+ for _, rp := range v.remoteProviders {
+ val, err := v.watchRemoteConfig(rp)
+ if err != nil {
+ continue
+ }
+ v.kvstore = val
+ return nil
+ }
+ return RemoteConfigError("No Files Found")
+}
+
+func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) {
+ reader, err := RemoteConfig.Watch(provider)
+ if err != nil {
+ return nil, err
+ }
+ err = v.unmarshalReader(reader, v.kvstore)
+ return v.kvstore, err
+}
+
+// AllKeys returns all keys holding a value, regardless of where they are set.
+// Nested keys are returned with a v.keyDelim (= ".") separator
+func AllKeys() []string { return v.AllKeys() }
+func (v *Viper) AllKeys() []string {
+ m := map[string]bool{}
+ // add all paths, by order of descending priority to ensure correct shadowing
+ m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "")
+ m = v.flattenAndMergeMap(m, v.override, "")
+ m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags))
+ m = v.mergeFlatMap(m, castMapStringSliceToMapInterface(v.env))
+ m = v.flattenAndMergeMap(m, v.config, "")
+ m = v.flattenAndMergeMap(m, v.kvstore, "")
+ m = v.flattenAndMergeMap(m, v.defaults, "")
+
+ // convert set of paths to list
+ a := []string{}
+ for x := range m {
+ a = append(a, x)
+ }
+ return a
+}
+
+// flattenAndMergeMap recursively flattens the given map into a map[string]bool
+// of key paths (used as a set, easier to manipulate than a []string):
+// - each path is merged into a single key string, delimited with v.keyDelim (= ".")
+// - if a path is shadowed by an earlier value in the initial shadow map,
+// it is skipped.
+// The resulting set of paths is merged to the given shadow set at the same time.
+func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interface{}, prefix string) map[string]bool {
+ if shadow != nil && prefix != "" && shadow[prefix] {
+ // prefix is shadowed => nothing more to flatten
+ return shadow
+ }
+ if shadow == nil {
+ shadow = make(map[string]bool)
+ }
+
+ var m2 map[string]interface{}
+ if prefix != "" {
+ prefix += v.keyDelim
+ }
+ for k, val := range m {
+ fullKey := prefix + k
+ switch val.(type) {
+ case map[string]interface{}:
+ m2 = val.(map[string]interface{})
+ case map[interface{}]interface{}:
+ m2 = cast.ToStringMap(val)
+ default:
+ // immediate value
+ shadow[strings.ToLower(fullKey)] = true
+ continue
+ }
+ // recursively merge to shadow map
+ shadow = v.flattenAndMergeMap(shadow, m2, fullKey)
+ }
+ return shadow
+}
+
+// mergeFlatMap merges the given maps, excluding values of the second map
+// shadowed by values from the first map.
+func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]interface{}) map[string]bool {
+ // scan keys
+outer:
+ for k := range m {
+ path := strings.Split(k, v.keyDelim)
+ // scan intermediate paths
+ var parentKey string
+ for i := 1; i < len(path); i++ {
+ parentKey = strings.Join(path[0:i], v.keyDelim)
+ if shadow[parentKey] {
+ // path is shadowed, continue
+ continue outer
+ }
+ }
+ // add key
+ shadow[strings.ToLower(k)] = true
+ }
+ return shadow
+}
+
+// AllSettings merges all settings and returns them as a map[string]interface{}.
+func AllSettings() map[string]interface{} { return v.AllSettings() }
+func (v *Viper) AllSettings() map[string]interface{} {
+ return v.allSettings(v.Get)
+}
+
+// AllSettingsWithoutDefault merges all settings and returns them as a map[string]interface{}.
+func AllSettingsWithoutDefault() map[string]interface{} { return v.AllSettingsWithoutDefault() }
+func (v *Viper) AllSettingsWithoutDefault() map[string]interface{} {
+ return v.allSettings(v.GetSkipDefault)
+}
+
+func (v *Viper) allSettings(getter func(string) interface{}) map[string]interface{} {
+ m := map[string]interface{}{}
+ // start from the list of keys, and construct the map one value at a time
+ for _, k := range v.AllKeys() {
+ value := getter(k)
+ if value == nil {
+ // should only happens if we `getter` ignors defaults
+ continue
+ }
+
+ // Build key path by splitting the key by keyDelim and checking that the parent keys
+ // are actually set.
+ // Example use case:
+ // Ensures sure that, for the yaml conf "foo.bar: baz", and keyDelim ".":
+ // the generated path is []string{"foo.bar", "baz"}, instead of []string{"foo", "bar", "baz"}
+ path := []string{}
+ splitPath := strings.Split(k, v.keyDelim)
+ i := 0
+ for j := range splitPath {
+ if v.IsSet(strings.Join(splitPath[:j+1], v.keyDelim)) {
+ path = append(path, strings.Join(splitPath[i:j+1], v.keyDelim))
+ i = j + 1
+ }
+ }
+
+ lastKey := strings.ToLower(path[len(path)-1])
+ deepestMap := deepSearch(m, path[0:len(path)-1])
+ // set innermost value
+ deepestMap[lastKey] = value
+ }
+ return m
+}
+
+// SetFs sets the filesystem to use to read configuration.
+func SetFs(fs afero.Fs) { v.SetFs(fs) }
+func (v *Viper) SetFs(fs afero.Fs) {
+ v.fs = fs
+}
+
+// SetConfigName sets name for the config file.
+// Does not include extension.
+func SetConfigName(in string) { v.SetConfigName(in) }
+func (v *Viper) SetConfigName(in string) {
+ if in != "" {
+ v.configName = in
+ v.configFile = ""
+ }
+}
+
+// SetConfigType sets the type of the configuration returned by the
+// remote source, e.g. "json".
+func SetConfigType(in string) { v.SetConfigType(in) }
+func (v *Viper) SetConfigType(in string) {
+ if in != "" {
+ v.configType = in
+ }
+}
+
+func (v *Viper) getConfigType() string {
+ if v.configType != "" {
+ return v.configType
+ }
+
+ cf, err := v.getConfigFile()
+ if err != nil {
+ return ""
+ }
+
+ ext := filepath.Ext(cf)
+
+ if len(ext) > 1 {
+ return ext[1:]
+ }
+
+ return ""
+}
+
+func (v *Viper) getConfigFile() (string, error) {
+ if v.configFile == "" {
+ cf, err := v.findConfigFile()
+ if err != nil {
+ return "", err
+ }
+ v.configFile = cf
+ }
+ return v.configFile, nil
+}
+
+func (v *Viper) searchInPath(in string) (filename string, err error) {
+ var lastError error
+ jww.DEBUG.Println("Searching for config in ", in)
+ for _, ext := range SupportedExts {
+ jww.DEBUG.Println("Checking for", filepath.Join(in, v.configName+"."+ext))
+ b, err := exists(v.fs, filepath.Join(in, v.configName+"."+ext))
+ if err != nil {
+ lastError = err
+ } else if b {
+ jww.DEBUG.Println("Found: ", filepath.Join(in, v.configName+"."+ext))
+ return filepath.Join(in, v.configName+"."+ext), nil
+ }
+ }
+
+ return "", lastError
+}
+
+// Search all configPaths for any config file.
+// Returns the first path that exists (and is a config file).
+func (v *Viper) findConfigFile() (string, error) {
+ jww.INFO.Println("Searching for config in ", v.configPaths)
+
+ var lastError error
+ for _, cp := range v.configPaths {
+ file, err := v.searchInPath(cp)
+ if file != "" {
+ return file, nil
+ }
+ if err != nil {
+ lastError = err
+ }
+ }
+
+ // If there was no more-specific error, assume this was a not-found error
+ if lastError == nil {
+ lastError = ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)}
+ }
+
+ return "", lastError
+}
+
+// Debug prints all configuration registries for debugging
+// purposes.
+func Debug() { v.Debug() }
+func (v *Viper) Debug() {
+ fmt.Printf("Aliases:\n%#v\n", v.aliases)
+ fmt.Printf("Override:\n%#v\n", v.override)
+ fmt.Printf("PFlags:\n%#v\n", v.pflags)
+ fmt.Printf("Env:\n%#v\n", v.env)
+ fmt.Printf("Key/Value Store:\n%#v\n", v.kvstore)
+ fmt.Printf("Config:\n%#v\n", v.config)
+ fmt.Printf("Defaults:\n%#v\n", v.defaults)
+}
diff --git a/vendor/github.com/DataDog/viper/watch_config.go b/vendor/github.com/DataDog/viper/watch_config.go
new file mode 100644
index 0000000000..36468336f1
--- /dev/null
+++ b/vendor/github.com/DataDog/viper/watch_config.go
@@ -0,0 +1,81 @@
+// +build !aix
+
+package viper
+
+import (
+ "log"
+ "path/filepath"
+ "sync"
+
+ "github.com/fsnotify/fsnotify"
+)
+
+func WatchConfig() { v.WatchConfig() }
+
+func (v *Viper) WatchConfig() {
+ initWG := sync.WaitGroup{}
+ initWG.Add(1)
+ go func() {
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer watcher.Close()
+ // we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way
+ filename, err := v.getConfigFile()
+ if err != nil {
+ log.Printf("error: %v\n", err)
+ return
+ }
+
+ configFile := filepath.Clean(filename)
+ configDir, _ := filepath.Split(configFile)
+ realConfigFile, _ := filepath.EvalSymlinks(filename)
+
+ eventsWG := sync.WaitGroup{}
+ eventsWG.Add(1)
+ go func() {
+ for {
+ select {
+ case event, ok := <-watcher.Events:
+ if !ok { // 'Events' channel is closed
+ eventsWG.Done()
+ return
+ }
+ currentConfigFile, _ := filepath.EvalSymlinks(filename)
+ // we only care about the config file with the following cases:
+ // 1 - if the config file was modified or created
+ // 2 - if the real path to the config file changed (eg: k8s ConfigMap replacement)
+ const writeOrCreateMask = fsnotify.Write | fsnotify.Create
+ if (filepath.Clean(event.Name) == configFile &&
+ event.Op&writeOrCreateMask != 0) ||
+ (currentConfigFile != "" && currentConfigFile != realConfigFile) {
+ realConfigFile = currentConfigFile
+ err := v.ReadInConfig()
+ if err != nil {
+ log.Printf("error reading config file: %v\n", err)
+ }
+ if v.onConfigChange != nil {
+ v.onConfigChange(event)
+ }
+ } else if filepath.Clean(event.Name) == configFile &&
+ event.Op&fsnotify.Remove&fsnotify.Remove != 0 {
+ eventsWG.Done()
+ return
+ }
+
+ case err, ok := <-watcher.Errors:
+ if ok { // 'Errors' channel is not closed
+ log.Printf("watcher error: %v\n", err)
+ }
+ eventsWG.Done()
+ return
+ }
+ }
+ }()
+ watcher.Add(configDir)
+ initWG.Done() // done initalizing the watch in this go routine, so the parent routine can move on...
+ eventsWG.Wait() // now, wait for event loop to end in this go-routine...
+ }()
+ initWG.Wait() // make sure that the go routine above fully ended before returning
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go
new file mode 100644
index 0000000000..d3992a4f7e
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go
@@ -0,0 +1,55 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+const (
+ bmsProjectIDEnv = "BMS_PROJECT_ID"
+ bmsRegionEnv = "BMS_REGION"
+ bmsInstanceIDEnv = "BMS_INSTANCE_ID"
+)
+
+// onBareMetalSolution checks if the code is running on a Google Cloud Bare Metal Solution (BMS) by verifying
+// the presence and non-empty values of BMS_PROJECT_ID, BMS_REGION, and BMS_INSTANCE_ID environment variables.
+// For more information on Google Cloud Bare Metal Solution, see: https://cloud.google.com/bare-metal/docs
+func (d *Detector) onBareMetalSolution() bool {
+ projectID, projectIDExists := d.os.LookupEnv(bmsProjectIDEnv)
+ region, regionExists := d.os.LookupEnv(bmsRegionEnv)
+ instanceID, instanceIDExists := d.os.LookupEnv(bmsInstanceIDEnv)
+ return projectIDExists && regionExists && instanceIDExists && projectID != "" && region != "" && instanceID != ""
+}
+
+// BareMetalSolutionInstanceID returns the instance ID from the BMS_INSTANCE_ID environment variable.
+func (d *Detector) BareMetalSolutionInstanceID() (string, error) {
+ if instanceID, found := d.os.LookupEnv(bmsInstanceIDEnv); found {
+ return instanceID, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// BareMetalSolutionCloudRegion returns the region from the BMS_REGION environment variable.
+func (d *Detector) BareMetalSolutionCloudRegion() (string, error) {
+ if region, found := d.os.LookupEnv(bmsRegionEnv); found {
+ return region, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// BareMetalSolutionProjectID returns the project ID from the BMS_PROJECT_ID environment variable.
+func (d *Detector) BareMetalSolutionProjectID() (string, error) {
+ if project, found := d.os.LookupEnv(bmsProjectIDEnv); found {
+ return project, nil
+ }
+ return "", errEnvVarNotFound
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go
index 3726215531..2cc62de097 100644
--- a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go
@@ -40,11 +40,14 @@ const (
CloudFunctions
AppEngineStandard
AppEngineFlex
+ BareMetalSolution
)
// CloudPlatform returns the platform on which this program is running.
func (d *Detector) CloudPlatform() Platform {
switch {
+ case d.onBareMetalSolution():
+ return BareMetalSolution
case d.onGKE():
return GKE
case d.onCloudFunctions():
diff --git a/vendor/github.com/IBM/sarama/Dockerfile.kafka b/vendor/github.com/IBM/sarama/Dockerfile.kafka
index 186c2eb186..ac2d47a164 100644
--- a/vendor/github.com/IBM/sarama/Dockerfile.kafka
+++ b/vendor/github.com/IBM/sarama/Dockerfile.kafka
@@ -1,4 +1,4 @@
-FROM registry.access.redhat.com/ubi8/ubi-minimal:8.8@sha256:b93deceb59a58588d5b16429fc47f98920f84740a1f2ed6454e33275f0701b59
+FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9@sha256:f30dbf77b075215f6c827c269c073b5e0973e5cea8dacdf7ecb6a19c868f37f2
USER root
diff --git a/vendor/github.com/IBM/sarama/docker-compose.yml b/vendor/github.com/IBM/sarama/docker-compose.yml
index e916416d50..55283cfe4f 100644
--- a/vendor/github.com/IBM/sarama/docker-compose.yml
+++ b/vendor/github.com/IBM/sarama/docker-compose.yml
@@ -3,6 +3,7 @@ services:
zookeeper-1:
hostname: 'zookeeper-1'
image: 'docker.io/library/zookeeper:3.6.3'
+ init: true
restart: always
environment:
ZOO_MY_ID: '1'
@@ -15,6 +16,7 @@ services:
zookeeper-2:
hostname: 'zookeeper-2'
image: 'docker.io/library/zookeeper:3.6.3'
+ init: true
restart: always
environment:
ZOO_MY_ID: '2'
@@ -27,6 +29,7 @@ services:
zookeeper-3:
hostname: 'zookeeper-3'
image: 'docker.io/library/zookeeper:3.6.3'
+ init: true
restart: always
environment:
ZOO_MY_ID: '3'
@@ -39,6 +42,7 @@ services:
kafka-1:
hostname: 'kafka-1'
image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}'
+ init: true
build:
context: .
dockerfile: Dockerfile.kafka
@@ -84,6 +88,7 @@ services:
kafka-2:
hostname: 'kafka-2'
image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}'
+ init: true
build:
context: .
dockerfile: Dockerfile.kafka
@@ -129,6 +134,7 @@ services:
kafka-3:
hostname: 'kafka-3'
image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}'
+ init: true
build:
context: .
dockerfile: Dockerfile.kafka
@@ -174,6 +180,7 @@ services:
kafka-4:
hostname: 'kafka-4'
image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}'
+ init: true
build:
context: .
dockerfile: Dockerfile.kafka
@@ -219,6 +226,7 @@ services:
kafka-5:
hostname: 'kafka-5'
image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}'
+ init: true
build:
context: .
dockerfile: Dockerfile.kafka
@@ -264,6 +272,7 @@ services:
toxiproxy:
hostname: 'toxiproxy'
image: 'ghcr.io/shopify/toxiproxy:2.4.0'
+ init: true
healthcheck:
test: ['CMD', '/toxiproxy-cli', 'l']
interval: 15s
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
index 639ba76309..e648346be7 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
@@ -3,4 +3,4 @@
package aws
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.26.1"
+const goModuleVersion = "1.27.0"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go
index b0133f4c88..19d6107c46 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go
@@ -112,6 +112,8 @@ type MetricData struct {
ResolveEndpointStartTime time.Time
ResolveEndpointEndTime time.Time
EndpointResolutionDuration time.Duration
+ GetIdentityStartTime time.Time
+ GetIdentityEndTime time.Time
InThroughput float64
OutThroughput float64
RetryCount int
@@ -122,6 +124,7 @@ type MetricData struct {
OperationName string
PartitionID string
Region string
+ UserAgent string
RequestContentLength int64
Stream StreamMetrics
Attempts []AttemptMetrics
@@ -144,8 +147,6 @@ type AttemptMetrics struct {
ConnRequestedTime time.Time
ConnObtainedTime time.Time
ConcurrencyAcquireDuration time.Duration
- CredentialFetchStartTime time.Time
- CredentialFetchEndTime time.Time
SignStartTime time.Time
SignEndTime time.Time
SigningDuration time.Duration
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go
index febeb0482d..a9db6433de 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go
@@ -11,7 +11,6 @@ import (
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics"
v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
@@ -301,22 +300,7 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl
return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")}
}
- mctx := metrics.Context(ctx)
-
- if mctx != nil {
- if attempt, err := mctx.Data().LatestAttempt(); err == nil {
- attempt.CredentialFetchStartTime = sdk.NowTime()
- }
- }
-
credentials, err := s.credentialsProvider.Retrieve(ctx)
-
- if mctx != nil {
- if attempt, err := mctx.Data().LatestAttempt(); err == nil {
- attempt.CredentialFetchEndTime = sdk.NowTime()
- }
- }
-
if err != nil {
return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)}
}
@@ -337,20 +321,7 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl
})
}
- if mctx != nil {
- if attempt, err := mctx.Data().LatestAttempt(); err == nil {
- attempt.SignStartTime = sdk.NowTime()
- }
- }
-
err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), signerOptions...)
-
- if mctx != nil {
- if attempt, err := mctx.Data().LatestAttempt(); err == nil {
- attempt.SignEndTime = sdk.NowTime()
- }
- }
-
if err != nil {
return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)}
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go
index bb61904e1d..55dfd07ba8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go
@@ -1,48 +1,41 @@
-// Package v4 implements signing for AWS V4 signer
+// Package v4 implements the AWS signature version 4 algorithm (commonly known
+// as SigV4).
//
-// Provides request signing for request that need to be signed with
-// AWS V4 Signatures.
+// For more information about SigV4, see [Signing AWS API requests] in the IAM
+// user guide.
//
-// # Standalone Signer
+// While this implementation CAN work in an external context, it is developed
+// primarily for SDK use and you may encounter fringe behaviors around header
+// canonicalization.
//
-// Generally using the signer outside of the SDK should not require any additional
+// # Pre-escaping a request URI
//
-// The signer does this by taking advantage of the URL.EscapedPath method. If your request URI requires
+// AWS v4 signature validation requires that the canonical string's URI path
+// component must be the escaped form of the HTTP request's path.
+//
+// The Go HTTP client will perform escaping automatically on the HTTP request.
+// This may cause signature validation errors because the request differs from
+// the URI path or query from which the signature was generated.
//
-// additional escaping you many need to use the URL.Opaque to define what the raw URI should be sent
-// to the service as.
+// Because of this, we recommend that you explicitly escape the request when
+// using this signer outside of the SDK to prevent possible signature mismatch.
+// This can be done by setting URL.Opaque on the request. The signer will
+// prefer that value, falling back to the return of URL.EscapedPath if unset.
//
-// The signer will first check the URL.Opaque field, and use its value if set.
-// The signer does require the URL.Opaque field to be set in the form of:
+// When setting URL.Opaque you must do so in the form of:
//
// "///"
//
// // e.g.
// "//example.com/some/path"
//
-// The leading "//" and hostname are required or the URL.Opaque escaping will
-// not work correctly.
-//
-// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
-// method and using the returned value.
-//
-// AWS v4 signature validation requires that the canonical string's URI path
-// element must be the URI escaped form of the HTTP request's path.
-// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
-//
-// The Go HTTP client will perform escaping automatically on the request. Some
-// of these escaping may cause signature validation errors because the HTTP
-// request differs from the URI path or query that the signature was generated.
-// https://golang.org/pkg/net/url/#URL.EscapedPath
+// The leading "//" and hostname are required or the escaping will not work
+// correctly.
//
-// Because of this, it is recommended that when using the signer outside of the
-// SDK that explicitly escaping the request prior to being signed is preferable,
-// and will help prevent signature validation errors. This can be done by setting
-// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
-// call URL.EscapedPath() if Opaque is not set.
+// The TestStandaloneSign unit test provides a complete example of using the
+// signer outside of the SDK and pre-escaping the URI path.
//
-// Test `TestStandaloneSign` provides a complete example of using the signer
-// outside of the SDK and pre-escaping the URI path.
+// [Signing AWS API requests]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html
package v4
import (
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
index 87ea591aef..20ce6ee871 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
@@ -1,3 +1,23 @@
+# v1.27.16 (2024-05-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.15 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.14 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.13 (2024-05-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.12 (2024-05-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.27.11 (2024-04-05)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
index 46566e90b6..60d884c4f7 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
@@ -3,4 +3,4 @@
package config
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.27.11"
+const goModuleVersion = "1.27.16"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
index 3b0bad426c..d93b31f47a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
@@ -1,3 +1,23 @@
+# v1.17.16 (2024-05-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.15 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.14 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.13 (2024-05-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.12 (2024-05-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.17.11 (2024-04-05)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
index 4cb3e3039f..91c40c6e70 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
@@ -3,4 +3,4 @@
package credentials
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.17.11"
+const goModuleVersion = "1.17.16"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
index 3807833dd4..15f2dff92d 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
@@ -1,3 +1,11 @@
+# v1.16.3 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.2 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.16.1 (2024-03-29)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
index 5642306f87..18c7d54f87 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
@@ -3,4 +3,4 @@
package imds
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.16.1"
+const goModuleVersion = "1.16.3"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
index 72e196dd9e..e5ab27663e 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
@@ -1,3 +1,11 @@
+# v1.3.7 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.6 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.3.5 (2024-03-29)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
index faf71cac3b..67cbc37674 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
@@ -3,4 +3,4 @@
package configsources
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.3.5"
+const goModuleVersion = "1.3.7"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
index 6f6dafa8d1..5ff8fef936 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
@@ -1,3 +1,11 @@
+# v2.6.7 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.6 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v2.6.5 (2024-03-29)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
index 279816314e..cc9b78076a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
@@ -3,4 +3,4 @@
package endpoints
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "2.6.5"
+const goModuleVersion = "2.6.7"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md
index 0f10e02283..c9e75845eb 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md
@@ -1,3 +1,11 @@
+# v1.3.7 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.6 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.3.5 (2024-03-29)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go
index 51aa32cf79..3ad7fe2ad3 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go
@@ -3,4 +3,4 @@
package v4a
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.3.5"
+const goModuleVersion = "1.3.7"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md
index 2246bd62ec..bb5f3faa79 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md
@@ -1,3 +1,11 @@
+# v1.3.9 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.8 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.3.7 (2024-03-29)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go
index 6785174da5..fffd7ee8fa 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go
@@ -3,4 +3,4 @@
package checksum
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.3.7"
+const goModuleVersion = "1.3.9"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
index 35c7050dd1..6067045210 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
@@ -1,3 +1,11 @@
+# v1.11.9 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.8 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.11.7 (2024-03-29)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
index daf77b5c38..24fd480d37 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
@@ -3,4 +3,4 @@
package presignedurl
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.11.7"
+const goModuleVersion = "1.11.9"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md
index 150e26f4e1..71d59349f4 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md
@@ -1,3 +1,11 @@
+# v1.17.7 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.6 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.17.5 (2024-03-29)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go
index a1f30ee06d..74be6efe1d 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go
@@ -3,4 +3,4 @@
package s3shared
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.17.5"
+const goModuleVersion = "1.17.7"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md
index ed52046889..d95930cdb7 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md
@@ -1,3 +1,23 @@
+# v1.54.3 (2024-05-23)
+
+* **Bug Fix**: Prevent parsing failures for nonstandard `Expires` values in responses. If the SDK cannot parse the value set in the response header for this field it will now be returned as `nil`. A new field, `ExpiresString`, has been added that will retain the unparsed value from the response (regardless of whether it came back in a format recognized by the SDK).
+
+# v1.54.2 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.54.1 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.54.0 (2024-05-14)
+
+* **Feature**: Updated a few x-id in the http uri traits
+
+# v1.53.2 (2024-05-08)
+
+* **Bug Fix**: GoDoc improvement
+
# v1.53.1 (2024-03-29)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go
index c71060e082..c10e498703 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go
@@ -18,22 +18,27 @@ import (
// by any previously uploaded parts will be freed. However, if any part uploads are
// currently in progress, those part uploads might or might not succeed. As a
// result, it might be necessary to abort a given multipart upload multiple times
-// in order to completely free all storage consumed by all parts. To verify that
-// all parts have been removed and prevent getting charged for the part storage,
-// you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
-// API operation and ensure that the parts list is empty. Directory buckets - For
-// directory buckets, you must make requests for this API operation to the Zonal
-// endpoint. These endpoints support virtual-hosted-style requests in the format
+// in order to completely free all storage consumed by all parts.
+//
+// To verify that all parts have been removed and prevent getting charged for the
+// part storage, you should call the [ListParts]API operation and ensure that the parts list
+// is empty.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style
-// requests are not supported. For more information, see Regional and Zonal
-// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide. Permissions
+// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User
+// Guide.
+//
+// Permissions
+//
// - General purpose bucket permissions - For information about permissions
-// required to use the multipart upload, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
-// in the Amazon S3 User Guide.
+// required to use the multipart upload, see [Multipart Upload and Permissions]in the Amazon S3 User Guide.
+//
// - Directory bucket permissions - To grant access to this API operation on a
-// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// API operation for session-based authorization. Specifically, you grant the
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
// s3express:CreateSession permission to the directory bucket in a bucket policy
// or an IAM identity-based policy. Then, you make the CreateSession API call on
// the bucket to obtain a session token. With the session token in your request
@@ -41,17 +46,31 @@ import (
// expires, you make another CreateSession API call to generate a new session
// token for use. Amazon Web Services CLI or SDKs create session and refresh the
// session token automatically to avoid service interruptions when a session
-// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// .
+// expires. For more information about authorization, see [CreateSession]CreateSession .
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket_name.s3express-az_id.region.amazonaws.com .
+//
+// The following operations are related to AbortMultipartUpload :
+//
+// [CreateMultipartUpload]
+//
+// [UploadPart]
+//
+// [CompleteMultipartUpload]
+//
+// [ListParts]
//
-// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
-// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are
-// related to AbortMultipartUpload :
-// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
-// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
-// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
-// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
-// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
+// [ListMultipartUploads]
+//
+// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
+// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
+// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
+// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipartUploadInput, optFns ...func(*Options)) (*AbortMultipartUploadOutput, error) {
if params == nil {
params = &AbortMultipartUploadInput{}
@@ -69,31 +88,39 @@ func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipar
type AbortMultipartUploadInput struct {
- // The bucket name to which the upload was taking place. Directory buckets - When
- // you use this operation with a directory bucket, you must use
- // virtual-hosted-style requests in the format
+ // The bucket name to which the upload was taking place.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
- // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide. Access points - When you use this action with an
- // access point, you must provide the alias of the access point in place of the
- // bucket name or specify the access point ARN. When using the access point ARN,
- // you must direct requests to the access point hostname. The access point hostname
- // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. Access points and Object Lambda access points are
- // not supported by directory buckets. S3 on Outposts - When you use this action
- // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
- // hostname. The S3 on Outposts hostname takes the form
+ // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -117,10 +144,12 @@ type AbortMultipartUploadInput struct {
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
noSmithyDocumentSerde
@@ -135,7 +164,9 @@ func (in *AbortMultipartUploadInput) bindEndpointParams(p *EndpointParameters) {
type AbortMultipartUploadOutput struct {
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go
index 8f89d780ee..1bdf3c6ab6 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go
@@ -13,51 +13,59 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// Completes a multipart upload by assembling previously uploaded parts. You first
-// initiate the multipart upload and then upload all parts using the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
-// operation or the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html)
-// operation. After successfully uploading all relevant parts of an upload, you
-// call this CompleteMultipartUpload operation to complete the upload. Upon
-// receiving this request, Amazon S3 concatenates all the parts in ascending order
-// by part number to create a new object. In the CompleteMultipartUpload request,
-// you must provide the parts list and ensure that the parts list is complete. The
-// CompleteMultipartUpload API operation concatenates the parts that you provide in
-// the list. For each part in the list, you must provide the PartNumber value and
-// the ETag value that are returned after that part was uploaded. The processing
-// of a CompleteMultipartUpload request could take several minutes to finalize.
-// After Amazon S3 begins processing the request, it sends an HTTP response header
-// that specifies a 200 OK response. While processing is in progress, Amazon S3
-// periodically sends white space characters to keep the connection from timing
-// out. A request could fail after the initial 200 OK response has been sent. This
-// means that a 200 OK response can contain either a success or an error. The
-// error response might be embedded in the 200 OK response. If you call this API
-// operation directly, make sure to design your application to parse the contents
-// of the response and handle it appropriately. If you use Amazon Web Services
-// SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply
-// error handling per your configuration settings (including automatically retrying
-// the request as appropriate). If the condition persists, the SDKs throw an
-// exception (or, for the SDKs that don't use exceptions, they return an error).
+// Completes a multipart upload by assembling previously uploaded parts.
+//
+// You first initiate the multipart upload and then upload all parts using the [UploadPart]
+// operation or the [UploadPartCopy]operation. After successfully uploading all relevant parts of
+// an upload, you call this CompleteMultipartUpload operation to complete the
+// upload. Upon receiving this request, Amazon S3 concatenates all the parts in
+// ascending order by part number to create a new object. In the
+// CompleteMultipartUpload request, you must provide the parts list and ensure that
+// the parts list is complete. The CompleteMultipartUpload API operation
+// concatenates the parts that you provide in the list. For each part in the list,
+// you must provide the PartNumber value and the ETag value that are returned
+// after that part was uploaded.
+//
+// The processing of a CompleteMultipartUpload request could take several minutes
+// to finalize. After Amazon S3 begins processing the request, it sends an HTTP
+// response header that specifies a 200 OK response. While processing is in
+// progress, Amazon S3 periodically sends white space characters to keep the
+// connection from timing out. A request could fail after the initial 200 OK
+// response has been sent. This means that a 200 OK response can contain either a
+// success or an error. The error response might be embedded in the 200 OK
+// response. If you call this API operation directly, make sure to design your
+// application to parse the contents of the response and handle it appropriately.
+// If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect
+// the embedded error and apply error handling per your configuration settings
+// (including automatically retrying the request as appropriate). If the condition
+// persists, the SDKs throw an exception (or, for the SDKs that don't use
+// exceptions, they return an error).
+//
// Note that if CompleteMultipartUpload fails, applications should be prepared to
// retry any failed requests (including 500 error responses). For more information,
-// see Amazon S3 Error Best Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html)
-// . You can't use Content-Type: application/x-www-form-urlencoded for the
+// see [Amazon S3 Error Best Practices].
+//
+// You can't use Content-Type: application/x-www-form-urlencoded for the
// CompleteMultipartUpload requests. Also, if you don't provide a Content-Type
-// header, CompleteMultipartUpload can still return a 200 OK response. For more
-// information about multipart uploads, see Uploading Objects Using Multipart
-// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html)
-// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must
-// make requests for this API operation to the Zonal endpoint. These endpoints
-// support virtual-hosted-style requests in the format
+// header, CompleteMultipartUpload can still return a 200 OK response.
+//
+// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style
-// requests are not supported. For more information, see Regional and Zonal
-// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide. Permissions
+// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User
+// Guide.
+//
+// Permissions
+//
// - General purpose bucket permissions - For information about permissions
-// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
-// in the Amazon S3 User Guide.
+// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide.
+//
// - Directory bucket permissions - To grant access to this API operation on a
-// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// API operation for session-based authorization. Specifically, you grant the
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
// s3express:CreateSession permission to the directory bucket in a bucket policy
// or an IAM identity-based policy. Then, you make the CreateSession API call on
// the bucket to obtain a session token. With the session token in your request
@@ -65,36 +73,65 @@ import (
// expires, you make another CreateSession API call to generate a new session
// token for use. Amazon Web Services CLI or SDKs create session and refresh the
// session token automatically to avoid service interruptions when a session
-// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// .
+// expires. For more information about authorization, see [CreateSession]CreateSession .
//
// Special errors
+//
// - Error Code: EntityTooSmall
+//
// - Description: Your proposed upload is smaller than the minimum allowed
// object size. Each part must be at least 5 MB in size, except the last part.
+//
// - HTTP Status Code: 400 Bad Request
+//
// - Error Code: InvalidPart
+//
// - Description: One or more of the specified parts could not be found. The
// part might not have been uploaded, or the specified ETag might not have matched
// the uploaded part's ETag.
+//
// - HTTP Status Code: 400 Bad Request
+//
// - Error Code: InvalidPartOrder
+//
// - Description: The list of parts was not in ascending order. The parts list
// must be specified in order by part number.
+//
// - HTTP Status Code: 400 Bad Request
+//
// - Error Code: NoSuchUpload
+//
// - Description: The specified multipart upload does not exist. The upload ID
// might be invalid, or the multipart upload might have been aborted or completed.
+//
// - HTTP Status Code: 404 Not Found
//
-// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
-// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are
-// related to CompleteMultipartUpload :
-// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
-// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
-// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
-// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
-// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket_name.s3express-az_id.region.amazonaws.com .
+//
+// The following operations are related to CompleteMultipartUpload :
+//
+// [CreateMultipartUpload]
+//
+// [UploadPart]
+//
+// [AbortMultipartUpload]
+//
+// [ListParts]
+//
+// [ListMultipartUploads]
+//
+// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html
+// [Amazon S3 Error Best Practices]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html
+// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
+// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
+// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
+// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
+// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMultipartUploadInput, optFns ...func(*Options)) (*CompleteMultipartUploadOutput, error) {
if params == nil {
params = &CompleteMultipartUploadInput{}
@@ -112,31 +149,39 @@ func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMu
type CompleteMultipartUploadInput struct {
- // Name of the bucket to which the multipart upload was initiated. Directory
- // buckets - When you use this operation with a directory bucket, you must use
- // virtual-hosted-style requests in the format
+ // Name of the bucket to which the multipart upload was initiated.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
- // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide. Access points - When you use this action with an
- // access point, you must provide the alias of the access point in place of the
- // bucket name or specify the access point ARN. When using the access point ARN,
- // you must direct requests to the access point hostname. The access point hostname
- // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. Access points and Object Lambda access points are
- // not supported by directory buckets. S3 on Outposts - When you use this action
- // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
- // hostname. The S3 on Outposts hostname takes the form
+ // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -153,30 +198,34 @@ type CompleteMultipartUploadInput struct {
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
- // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity]
// in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumCRC32 *string
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
- // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity]
// in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumCRC32C *string
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
- // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see [Checking object integrity]
// in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumSHA1 *string
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
- // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see [Checking object integrity]
// in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumSHA256 *string
// The account ID of the expected bucket owner. If the account ID that you provide
@@ -191,32 +240,40 @@ type CompleteMultipartUploadInput struct {
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
// The server-side encryption (SSE) algorithm used to encrypt the object. This
// parameter is required only when the object was created using a checksum
// algorithm or if your bucket policy requires the use of SSE-C. For more
- // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key
SSECustomerAlgorithm *string
// The server-side encryption (SSE) customer managed key. This parameter is needed
// only when the object was created using a checksum algorithm. For more
- // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
SSECustomerKey *string
// The MD5 server-side encryption (SSE) customer managed key. This parameter is
// needed only when the object was created using a checksum algorithm. For more
- // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
SSECustomerKeyMD5 *string
noSmithyDocumentSerde
@@ -231,13 +288,15 @@ func (in *CompleteMultipartUploadInput) bindEndpointParams(p *EndpointParameters
type CompleteMultipartUploadOutput struct {
// The name of the bucket that contains the newly created object. Does not return
- // the access point ARN or access point alias if used. Access points are not
- // supported by directory buckets.
+ // the access point ARN or access point alias if used.
+ //
+ // Access points are not supported by directory buckets.
Bucket *string
// Indicates whether the multipart upload uses an S3 Bucket Key for server-side
- // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality
- // is not supported for directory buckets.
+ // encryption with Key Management Service (KMS) keys (SSE-KMS).
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool
// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
@@ -245,8 +304,10 @@ type CompleteMultipartUploadOutput struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumCRC32 *string
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
@@ -254,8 +315,10 @@ type CompleteMultipartUploadOutput struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumCRC32C *string
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
@@ -263,8 +326,10 @@ type CompleteMultipartUploadOutput struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumSHA1 *string
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
@@ -272,8 +337,10 @@ type CompleteMultipartUploadOutput struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumSHA256 *string
// Entity tag that identifies the newly created object's data. Objects with
@@ -282,12 +349,14 @@ type CompleteMultipartUploadOutput struct {
// data. If the entity tag is not an MD5 digest of the object data, it will contain
// one or more nonhexadecimal characters and/or will consist of less than 32 or
// more than 32 hexadecimal digits. For more information about how the entity tag
- // is calculated, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide.
+ // is calculated, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ETag *string
// If the object expiration is configured, this will contain the expiration date (
// expiry-date ) and rule ID ( rule-id ). The value of rule-id is URL-encoded.
+ //
// This functionality is not supported for directory buckets.
Expiration *string
@@ -298,21 +367,28 @@ type CompleteMultipartUploadOutput struct {
Location *string
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// If present, indicates the ID of the Key Management Service (KMS) symmetric
- // encryption customer managed key that was used for the object. This functionality
- // is not supported for directory buckets.
+ // encryption customer managed key that was used for the object.
+ //
+ // This functionality is not supported for directory buckets.
SSEKMSKeyId *string
// The server-side encryption algorithm used when storing this object in Amazon S3
- // (for example, AES256 , aws:kms ). For directory buckets, only server-side
- // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported.
+ // (for example, AES256 , aws:kms ).
+ //
+ // For directory buckets, only server-side encryption with Amazon S3 managed keys
+ // (SSE-S3) ( AES256 ) is supported.
ServerSideEncryption types.ServerSideEncryption
// Version ID of the newly created object, in case the bucket has versioning
- // turned on. This functionality is not supported for directory buckets.
+ // turned on.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go
index c7990bab1d..bae0b7cb49 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go
@@ -15,102 +15,134 @@ import (
"time"
)
-// Creates a copy of an object that is already stored in Amazon S3. You can store
-// individual objects of up to 5 TB in Amazon S3. You create a copy of your object
-// up to 5 GB in size in a single atomic action using this API. However, to copy an
-// object greater than 5 GB, you must use the multipart upload Upload Part - Copy
-// (UploadPartCopy) API. For more information, see Copy Object Using the REST
-// Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html)
-// . You can copy individual objects between general purpose buckets, between
+// Creates a copy of an object that is already stored in Amazon S3.
+//
+// You can store individual objects of up to 5 TB in Amazon S3. You create a copy
+// of your object up to 5 GB in size in a single atomic action using this API.
+// However, to copy an object greater than 5 GB, you must use the multipart upload
+// Upload Part - Copy (UploadPartCopy) API. For more information, see [Copy Object Using the REST Multipart Upload API].
+//
+// You can copy individual objects between general purpose buckets, between
// directory buckets, and between general purpose buckets and directory buckets.
+//
// Directory buckets - For directory buckets, you must make requests for this API
// operation to the Zonal endpoint. These endpoints support virtual-hosted-style
// requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style
-// requests are not supported. For more information, see Regional and Zonal
-// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide. Both the Region that you want to copy the object
-// from and the Region that you want to copy the object to must be enabled for your
-// account. For more information about how to enable a Region for your account, see
-// Enable or disable a Region for standalone accounts (https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone)
-// in the Amazon Web Services Account Management Guide. Amazon S3 transfer
-// acceleration does not support cross-Region copies. If you request a cross-Region
-// copy using a transfer acceleration endpoint, you get a 400 Bad Request error.
-// For more information, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html)
-// . Authentication and authorization All CopyObject requests must be
-// authenticated and signed by using IAM credentials (access key ID and secret
-// access key for the IAM identities). All headers with the x-amz- prefix,
-// including x-amz-copy-source , must be signed. For more information, see REST
-// Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html)
-// . Directory buckets - You must use the IAM credentials to authenticate and
+// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User
+// Guide.
+//
+// Both the Region that you want to copy the object from and the Region that you
+// want to copy the object to must be enabled for your account. For more
+// information about how to enable a Region for your account, see [Enable or disable a Region for standalone accounts]in the Amazon
+// Web Services Account Management Guide.
+//
+// Amazon S3 transfer acceleration does not support cross-Region copies. If you
+// request a cross-Region copy using a transfer acceleration endpoint, you get a
+// 400 Bad Request error. For more information, see [Transfer Acceleration].
+//
+// Authentication and authorization All CopyObject requests must be authenticated
+// and signed by using IAM credentials (access key ID and secret access key for the
+// IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source
+// , must be signed. For more information, see [REST Authentication].
+//
+// Directory buckets - You must use the IAM credentials to authenticate and
// authorize your access to the CopyObject API operation, instead of using the
-// temporary security credentials through the CreateSession API operation. Amazon
-// Web Services CLI or SDKs handles authentication and authorization on your
-// behalf. Permissions You must have read access to the source object and write
-// access to the destination bucket.
+// temporary security credentials through the CreateSession API operation.
+//
+// Amazon Web Services CLI or SDKs handles authentication and authorization on
+// your behalf.
+//
+// Permissions You must have read access to the source object and write access to
+// the destination bucket.
+//
// - General purpose bucket permissions - You must have permissions in an IAM
// policy based on the source and destination bucket types in a CopyObject
// operation.
+//
// - If the source object is in a general purpose bucket, you must have
// s3:GetObject permission to read the source object that is being copied.
+//
// - If the destination bucket is a general purpose bucket, you must have
// s3:PutObject permission to write the object copy to the destination bucket.
+//
// - Directory bucket permissions - You must have permissions in a bucket policy
// or an IAM identity-based policy based on the source and destination bucket types
// in a CopyObject operation.
+//
// - If the source object that you want to copy is in a directory bucket, you
// must have the s3express:CreateSession permission in the Action element of a
// policy to read the object. By default, the session is in the ReadWrite mode.
// If you want to restrict the access, you can explicitly set the
// s3express:SessionMode condition key to ReadOnly on the copy source bucket.
+//
// - If the copy destination is a directory bucket, you must have the
// s3express:CreateSession permission in the Action element of a policy to write
// the object to the destination. The s3express:SessionMode condition key can't
-// be set to ReadOnly on the copy destination bucket. For example policies, see
-// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html)
-// and Amazon Web Services Identity and Access Management (IAM) identity-based
-// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html)
-// in the Amazon S3 User Guide.
+// be set to ReadOnly on the copy destination bucket.
+//
+// For example policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 User Guide.
//
// Response and special errors When the request is an HTTP 1.1 request, the
// response is chunk encoded. When the request is not an HTTP 1.1 request, the
// response would not contain the Content-Length . You always need to read the
// entire response body to check if the copy succeeds. to keep the connection alive
// while we copy the data.
+//
// - If the copy is successful, you receive a response with information about
// the copied object.
+//
// - A copy request might return an error when Amazon S3 receives the copy
// request or while Amazon S3 is copying the files. A 200 OK response can contain
// either a success or an error.
+//
// - If the error occurs before the copy action starts, you receive a standard
// Amazon S3 error.
+//
// - If the error occurs during the copy operation, the error response is
// embedded in the 200 OK response. For example, in a cross-region copy, you may
-// encounter throttling and receive a 200 OK response. For more information, see
-// Resolve the Error 200 response when copying objects to Amazon S3 (https://repost.aws/knowledge-center/s3-resolve-200-internalerror)
+// encounter throttling and receive a 200 OK response. For more information, see [Resolve the Error 200 response when copying objects to Amazon S3]
// . The 200 OK status code means the copy was accepted, but it doesn't mean the
// copy is complete. Another example is when you disconnect from Amazon S3 before
// the copy is complete, Amazon S3 might cancel the copy and you may receive a
// 200 OK response. You must stay connected to Amazon S3 until the entire
-// response is successfully received and processed. If you call this API operation
-// directly, make sure to design your application to parse the content of the
-// response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs
-// handle this condition. The SDKs detect the embedded error and apply error
-// handling per your configuration settings (including automatically retrying the
-// request as appropriate). If the condition persists, the SDKs throw an exception
-// (or, for the SDKs that don't use exceptions, they return an error).
+// response is successfully received and processed.
+//
+// If you call this API operation directly, make sure to design your application
+//
+// to parse the content of the response and handle it appropriately. If you use
+// Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the
+// embedded error and apply error handling per your configuration settings
+// (including automatically retrying the request as appropriate). If the condition
+// persists, the SDKs throw an exception (or, for the SDKs that don't use
+// exceptions, they return an error).
//
// Charge The copy request charge is based on the storage class and Region that
// you specify for the destination object. The request can also result in a data
// retrieval charge for the source if the source storage class bills for data
// retrieval. If the copy source is in a different region, the data transfer is
-// billed to the copy source account. For pricing information, see Amazon S3
-// pricing (http://aws.amazon.com/s3/pricing/) . HTTP Host header syntax Directory
-// buckets - The HTTP Host header syntax is
-// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are
-// related to CopyObject :
-// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
-// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+// billed to the copy source account. For pricing information, see [Amazon S3 pricing].
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket_name.s3express-az_id.region.amazonaws.com .
+//
+// The following operations are related to CopyObject :
+//
+// [PutObject]
+//
+// [GetObject]
+//
+// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html
+// [Resolve the Error 200 response when copying objects to Amazon S3]: https://repost.aws/knowledge-center/s3-resolve-200-internalerror
+// [Copy Object Using the REST Multipart Upload API]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html
+// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
+// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [Enable or disable a Region for standalone accounts]: https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone
+// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
+// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+// [Amazon S3 pricing]: http://aws.amazon.com/s3/pricing/
func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns ...func(*Options)) (*CopyObjectOutput, error) {
if params == nil {
params = &CopyObjectInput{}
@@ -128,31 +160,39 @@ func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns
type CopyObjectInput struct {
- // The name of the destination bucket. Directory buckets - When you use this
- // operation with a directory bucket, you must use virtual-hosted-style requests in
- // the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style
- // requests are not supported. Directory bucket names must be unique in the chosen
- // Availability Zone. Bucket names must follow the format
- // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
- // ). For information about bucket naming restrictions, see Directory bucket
- // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide. Access points - When you use this action with an
- // access point, you must provide the alias of the access point in place of the
- // bucket name or specify the access point ARN. When using the access point ARN,
- // you must direct requests to the access point hostname. The access point hostname
- // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. Access points and Object Lambda access points are
- // not supported by directory buckets. S3 on Outposts - When you use this action
- // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
- // hostname. The S3 on Outposts hostname takes the form
+ // The name of the destination bucket.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
+ // supported. Directory bucket names must be unique in the chosen Availability
+ // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
+ // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
+ // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -160,10 +200,11 @@ type CopyObjectInput struct {
// Specifies the source object for the copy operation. The source object can be up
// to 5 GB. If the source object is an object that was uploaded by using a
// multipart upload, the object copy will be a single part object after the source
- // object is copied to the destination bucket. You specify the value of the copy
- // source in one of two formats, depending on whether you want to access the source
- // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html)
- // :
+ // object is copied to the destination bucket.
+ //
+ // You specify the value of the copy source in one of two formats, depending on
+ // whether you want to access the source object through an [access point]:
+ //
// - For objects not accessed through an access point, specify the name of the
// source bucket and the key of the source object, separated by a slash (/). For
// example, to copy the object reports/january.pdf from the general purpose
@@ -172,6 +213,7 @@ type CopyObjectInput struct {
// bucket awsexamplebucket--use1-az5--x-s3 , use
// awsexamplebucket--use1-az5--x-s3/reports/january.pdf . The value must be
// URL-encoded.
+ //
// - For objects accessed through access points, specify the Amazon Resource
// Name (ARN) of the object as accessed through the access point, in the format
// arn:aws:s3:::accesspoint//object/ . For example, to copy the object
@@ -179,15 +221,20 @@ type CopyObjectInput struct {
// 123456789012 in Region us-west-2 , use the URL encoding of
// arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf
// . The value must be URL encoded.
+ //
// - Amazon S3 supports copy operations using Access points only when the source
// and destination buckets are in the same Amazon Web Services Region.
- // - Access points are not supported by directory buckets. Alternatively, for
- // objects accessed through Amazon S3 on Outposts, specify the ARN of the object as
- // accessed in the format arn:aws:s3-outposts:::outpost//object/ . For example,
- // to copy the object reports/january.pdf through outpost my-outpost owned by
- // account 123456789012 in Region us-west-2 , use the URL encoding of
+ //
+ // - Access points are not supported by directory buckets.
+ //
+ // Alternatively, for objects accessed through Amazon S3 on Outposts, specify the
+ // ARN of the object as accessed in the format
+ // arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object
+ // reports/january.pdf through outpost my-outpost owned by account 123456789012
+ // in Region us-west-2 , use the URL encoding of
// arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf
// . The value must be URL-encoded.
+ //
// If your source bucket versioning is enabled, the x-amz-copy-source header by
// default identifies the current version of an object to copy. If the current
// version is a delete marker, Amazon S3 behaves as if the object was deleted. To
@@ -195,14 +242,21 @@ type CopyObjectInput struct {
// append ?versionId= to the value (for example,
// awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893
// ). If you don't specify a version ID, Amazon S3 copies the latest version of the
- // source object. If you enable versioning on the destination bucket, Amazon S3
- // generates a unique version ID for the copied object. This version ID is
- // different from the version ID of the source object. Amazon S3 returns the
- // version ID of the copied object in the x-amz-version-id response header in the
- // response. If you do not enable versioning or suspend it on the destination
- // bucket, the version ID that Amazon S3 generates in the x-amz-version-id
- // response header is always null. Directory buckets - S3 Versioning isn't enabled
- // and supported for directory buckets.
+ // source object.
+ //
+ // If you enable versioning on the destination bucket, Amazon S3 generates a
+ // unique version ID for the copied object. This version ID is different from the
+ // version ID of the source object. Amazon S3 returns the version ID of the copied
+ // object in the x-amz-version-id response header in the response.
+ //
+ // If you do not enable versioning or suspend it on the destination bucket, the
+ // version ID that Amazon S3 generates in the x-amz-version-id response header is
+ // always null.
+ //
+ // Directory buckets - S3 Versioning isn't enabled and supported for directory
+ // buckets.
+ //
+ // [access point]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html
//
// This member is required.
CopySource *string
@@ -212,51 +266,67 @@ type CopyObjectInput struct {
// This member is required.
Key *string
- // The canned access control list (ACL) to apply to the object. When you copy an
- // object, the ACL metadata is not preserved and is set to private by default.
- // Only the owner has full access control. To override the default ACL setting,
- // specify a new ACL when you generate a copy request. For more information, see
- // Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html)
- // . If the destination bucket that you're copying objects to uses the bucket owner
+ // The canned access control list (ACL) to apply to the object.
+ //
+ // When you copy an object, the ACL metadata is not preserved and is set to private
+ // by default. Only the owner has full access control. To override the default ACL
+ // setting, specify a new ACL when you generate a copy request. For more
+ // information, see [Using ACLs].
+ //
+ // If the destination bucket that you're copying objects to uses the bucket owner
// enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect
// permissions. Buckets that use this setting only accept PUT requests that don't
// specify an ACL or PUT requests that specify bucket owner full control ACLs,
// such as the bucket-owner-full-control canned ACL or an equivalent form of this
- // ACL expressed in the XML format. For more information, see Controlling
- // ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
- // in the Amazon S3 User Guide.
+ // ACL expressed in the XML format. For more information, see [Controlling ownership of objects and disabling ACLs]in the Amazon S3
+ // User Guide.
+ //
// - If your destination bucket uses the bucket owner enforced setting for
// Object Ownership, all objects written to the bucket by any account will be owned
// by the bucket owner.
+ //
// - This functionality is not supported for directory buckets.
+ //
// - This functionality is not supported for Amazon S3 on Outposts.
+ //
+ // [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html
+ // [Controlling ownership of objects and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
ACL types.ObjectCannedACL
// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption
// with server-side encryption using Key Management Service (KMS) keys (SSE-KMS).
// If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object.
+ //
// Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object
// encryption with SSE-KMS. Specifying this header with a COPY action doesn’t
- // affect bucket-level settings for S3 Bucket Key. For more information, see
- // Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html)
- // in the Amazon S3 User Guide. This functionality is not supported when the
- // destination bucket is a directory bucket.
+ // affect bucket-level settings for S3 Bucket Key.
+ //
+ // For more information, see [Amazon S3 Bucket Keys] in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
+ //
+ // [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html
BucketKeyEnabled *bool
// Specifies the caching behavior along the request/reply chain.
CacheControl *string
// Indicates the algorithm that you want Amazon S3 to use to create the checksum
- // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. When you copy an object, if the source object has a
- // checksum, that checksum value will be copied to the new object by default. If
- // the CopyObject request does not include this x-amz-checksum-algorithm header,
- // the checksum algorithm will be copied from the source object to the destination
- // object (if it's present on the source object). You can optionally specify a
- // different checksum algorithm to use with the x-amz-checksum-algorithm header.
- // Unrecognized or unsupported values will respond with the HTTP status code 400
- // Bad Request . For directory buckets, when you use Amazon Web Services SDKs,
- // CRC32 is the default checksum algorithm that's used for performance.
+ // for the object. For more information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // When you copy an object, if the source object has a checksum, that checksum
+ // value will be copied to the new object by default. If the CopyObject request
+ // does not include this x-amz-checksum-algorithm header, the checksum algorithm
+ // will be copied from the source object to the destination object (if it's present
+ // on the source object). You can optionally specify a different checksum algorithm
+ // to use with the x-amz-checksum-algorithm header. Unrecognized or unsupported
+ // values will respond with the HTTP status code 400 Bad Request .
+ //
+ // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the
+ // default checksum algorithm that's used for performance.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
// Specifies presentational information for the object. Indicates whether an
@@ -266,8 +336,10 @@ type CopyObjectInput struct {
// Specifies what content encodings have been applied to the object and thus what
// decoding mechanisms must be applied to obtain the media-type referenced by the
- // Content-Type header field. For directory buckets, only the aws-chunked value is
- // supported in this header field.
+ // Content-Type header field.
+ //
+ // For directory buckets, only the aws-chunked value is supported in this header
+ // field.
ContentEncoding *string
// The language the content is in.
@@ -276,62 +348,85 @@ type CopyObjectInput struct {
// A standard MIME type that describes the format of the object data.
ContentType *string
- // Copies the object if its entity tag (ETag) matches the specified tag. If both
- // the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
+ // Copies the object if its entity tag (ETag) matches the specified tag.
+ //
+ // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
// headers are present in the request and evaluate as follows, Amazon S3 returns
// 200 OK and copies the data:
+ //
// - x-amz-copy-source-if-match condition evaluates to true
+ //
// - x-amz-copy-source-if-unmodified-since condition evaluates to false
CopySourceIfMatch *string
- // Copies the object if it has been modified since the specified time. If both the
- // x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers
- // are present in the request and evaluate as follows, Amazon S3 returns the 412
- // Precondition Failed response code:
+ // Copies the object if it has been modified since the specified time.
+ //
+ // If both the x-amz-copy-source-if-none-match and
+ // x-amz-copy-source-if-modified-since headers are present in the request and
+ // evaluate as follows, Amazon S3 returns the 412 Precondition Failed response
+ // code:
+ //
// - x-amz-copy-source-if-none-match condition evaluates to false
+ //
// - x-amz-copy-source-if-modified-since condition evaluates to true
CopySourceIfModifiedSince *time.Time
- // Copies the object if its entity tag (ETag) is different than the specified
- // ETag. If both the x-amz-copy-source-if-none-match and
+ // Copies the object if its entity tag (ETag) is different than the specified ETag.
+ //
+ // If both the x-amz-copy-source-if-none-match and
// x-amz-copy-source-if-modified-since headers are present in the request and
// evaluate as follows, Amazon S3 returns the 412 Precondition Failed response
// code:
+ //
// - x-amz-copy-source-if-none-match condition evaluates to false
+ //
// - x-amz-copy-source-if-modified-since condition evaluates to true
CopySourceIfNoneMatch *string
- // Copies the object if it hasn't been modified since the specified time. If both
- // the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
+ // Copies the object if it hasn't been modified since the specified time.
+ //
+ // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
// headers are present in the request and evaluate as follows, Amazon S3 returns
// 200 OK and copies the data:
+ //
// - x-amz-copy-source-if-match condition evaluates to true
+ //
// - x-amz-copy-source-if-unmodified-since condition evaluates to false
CopySourceIfUnmodifiedSince *time.Time
// Specifies the algorithm to use when decrypting the source object (for example,
- // AES256 ). If the source object for the copy is stored in Amazon S3 using SSE-C,
- // you must provide the necessary encryption information in your request so that
- // Amazon S3 can decrypt the object for copying. This functionality is not
- // supported when the source object is in a directory bucket.
+ // AES256 ).
+ //
+ // If the source object for the copy is stored in Amazon S3 using SSE-C, you must
+ // provide the necessary encryption information in your request so that Amazon S3
+ // can decrypt the object for copying.
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
CopySourceSSECustomerAlgorithm *string
// Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
// the source object. The encryption key provided in this header must be the same
- // one that was used when the source object was created. If the source object for
- // the copy is stored in Amazon S3 using SSE-C, you must provide the necessary
- // encryption information in your request so that Amazon S3 can decrypt the object
- // for copying. This functionality is not supported when the source object is in a
- // directory bucket.
+ // one that was used when the source object was created.
+ //
+ // If the source object for the copy is stored in Amazon S3 using SSE-C, you must
+ // provide the necessary encryption information in your request so that Amazon S3
+ // can decrypt the object for copying.
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
CopySourceSSECustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
- // encryption key was transmitted without error. If the source object for the copy
- // is stored in Amazon S3 using SSE-C, you must provide the necessary encryption
- // information in your request so that Amazon S3 can decrypt the object for
- // copying. This functionality is not supported when the source object is in a
- // directory bucket.
+ // encryption key was transmitted without error.
+ //
+ // If the source object for the copy is stored in Amazon S3 using SSE-C, you must
+ // provide the necessary encryption information in your request so that Amazon S3
+ // can decrypt the object for copying.
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
CopySourceSSECustomerKeyMD5 *string
// The account ID of the expected destination bucket owner. If the account ID that
@@ -348,22 +443,30 @@ type CopyObjectInput struct {
Expires *time.Time
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ //
// - This functionality is not supported for directory buckets.
+ //
// - This functionality is not supported for Amazon S3 on Outposts.
GrantFullControl *string
// Allows grantee to read the object data and its metadata.
+ //
// - This functionality is not supported for directory buckets.
+ //
// - This functionality is not supported for Amazon S3 on Outposts.
GrantRead *string
// Allows grantee to read the object ACL.
+ //
// - This functionality is not supported for directory buckets.
+ //
// - This functionality is not supported for Amazon S3 on Outposts.
GrantReadACP *string
// Allows grantee to write the ACL for the applicable object.
+ //
// - This functionality is not supported for directory buckets.
+ //
// - This functionality is not supported for Amazon S3 on Outposts.
GrantWriteACP *string
@@ -373,26 +476,32 @@ type CopyObjectInput struct {
// Specifies whether the metadata is copied from the source object or replaced
// with metadata that's provided in the request. When copying an object, you can
// preserve all metadata (the default) or specify new metadata. If this header
- // isn’t specified, COPY is the default behavior. General purpose bucket - For
- // general purpose buckets, when you grant permissions, you can use the
- // s3:x-amz-metadata-directive condition key to enforce certain metadata behavior
- // when objects are uploaded. For more information, see Amazon S3 condition key
- // examples (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html)
- // in the Amazon S3 User Guide. x-amz-website-redirect-location is unique to each
- // object and is not copied when using the x-amz-metadata-directive header. To
- // copy the value, you must specify x-amz-website-redirect-location in the request
- // header.
+ // isn’t specified, COPY is the default behavior.
+ //
+ // General purpose bucket - For general purpose buckets, when you grant
+ // permissions, you can use the s3:x-amz-metadata-directive condition key to
+ // enforce certain metadata behavior when objects are uploaded. For more
+ // information, see [Amazon S3 condition key examples]in the Amazon S3 User Guide.
+ //
+ // x-amz-website-redirect-location is unique to each object and is not copied when
+ // using the x-amz-metadata-directive header. To copy the value, you must specify
+ // x-amz-website-redirect-location in the request header.
+ //
+ // [Amazon S3 condition key examples]: https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html
MetadataDirective types.MetadataDirective
- // Specifies whether you want to apply a legal hold to the object copy. This
- // functionality is not supported for directory buckets.
+ // Specifies whether you want to apply a legal hold to the object copy.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
- // The Object Lock mode that you want to apply to the object copy. This
- // functionality is not supported for directory buckets.
+ // The Object Lock mode that you want to apply to the object copy.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockMode types.ObjectLockMode
// The date and time when you want the Object Lock of the object copy to expire.
+ //
// This functionality is not supported for directory buckets.
ObjectLockRetainUntilDate *time.Time
@@ -400,19 +509,23 @@ type CopyObjectInput struct {
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
- // Specifies the algorithm to use when encrypting the object (for example, AES256
- // ). When you perform a CopyObject operation, if you want to use a different type
- // of encryption setting for the target object, you can specify appropriate
+ // Specifies the algorithm to use when encrypting the object (for example, AES256 ).
+ //
+ // When you perform a CopyObject operation, if you want to use a different type of
+ // encryption setting for the target object, you can specify appropriate
// encryption-related headers to encrypt the target object with an Amazon S3
// managed key, a KMS key, or a customer-provided key. If the encryption setting in
// your request is different from the default encryption configuration of the
// destination bucket, the encryption setting in your request takes precedence.
+ //
// This functionality is not supported when the destination bucket is a directory
// bucket.
SSECustomerAlgorithm *string
@@ -421,38 +534,49 @@ type CopyObjectInput struct {
// encrypting data. This value is used to store the object and then it is
// discarded. Amazon S3 does not store the encryption key. The key must be
// appropriate for use with the algorithm specified in the
- // x-amz-server-side-encryption-customer-algorithm header. This functionality is
- // not supported when the destination bucket is a directory bucket.
+ // x-amz-server-side-encryption-customer-algorithm header.
+ //
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
SSECustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
- // encryption key was transmitted without error. This functionality is not
- // supported when the destination bucket is a directory bucket.
+ // encryption key was transmitted without error.
+ //
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
SSECustomerKeyMD5 *string
// Specifies the Amazon Web Services KMS Encryption Context to use for object
// encryption. The value of this header is a base64-encoded UTF-8 string holding
// JSON with the encryption context key-value pairs. This value must be explicitly
- // added to specify encryption context for CopyObject requests. This functionality
- // is not supported when the destination bucket is a directory bucket.
+ // added to specify encryption context for CopyObject requests.
+ //
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
SSEKMSEncryptionContext *string
// Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object
// encryption. All GET and PUT requests for an object protected by KMS will fail if
// they're not made via SSL or using SigV4. For information about configuring any
// of the officially supported Amazon Web Services SDKs and Amazon Web Services
- // CLI, see Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version)
- // in the Amazon S3 User Guide. This functionality is not supported when the
- // destination bucket is a directory bucket.
+ // CLI, see [Specifying the Signature Version in Request Authentication]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
+ //
+ // [Specifying the Signature Version in Request Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
SSEKMSKeyId *string
// The server-side encryption algorithm used when storing this object in Amazon S3
// (for example, AES256 , aws:kms , aws:kms:dsse ). Unrecognized or unsupported
// values won’t write a destination object and will receive a 400 Bad Request
- // response. Amazon S3 automatically encrypts all new objects that are copied to an
- // S3 bucket. When copying an object, if you don't specify encryption information
- // in your copy request, the encryption setting of the target object is set to the
+ // response.
+ //
+ // Amazon S3 automatically encrypts all new objects that are copied to an S3
+ // bucket. When copying an object, if you don't specify encryption information in
+ // your copy request, the encryption setting of the target object is set to the
// default encryption configuration of the destination bucket. By default, all
// buckets have a base level of encryption configuration that uses server-side
// encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a
@@ -460,42 +584,55 @@ type CopyObjectInput struct {
// Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with
// Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with
// customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding KMS
- // key, or a customer-provided key to encrypt the target object copy. When you
- // perform a CopyObject operation, if you want to use a different type of
+ // key, or a customer-provided key to encrypt the target object copy.
+ //
+ // When you perform a CopyObject operation, if you want to use a different type of
// encryption setting for the target object, you can specify appropriate
// encryption-related headers to encrypt the target object with an Amazon S3
// managed key, a KMS key, or a customer-provided key. If the encryption setting in
// your request is different from the default encryption configuration of the
// destination bucket, the encryption setting in your request takes precedence.
- // With server-side encryption, Amazon S3 encrypts your data as it writes your data
- // to disks in its data centers and decrypts the data when you access it. For more
- // information about server-side encryption, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html)
- // in the Amazon S3 User Guide. For directory buckets, only server-side encryption
- // with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported.
+ //
+ // With server-side encryption, Amazon S3 encrypts your data as it writes your
+ // data to disks in its data centers and decrypts the data when you access it. For
+ // more information about server-side encryption, see [Using Server-Side Encryption]in the Amazon S3 User Guide.
+ //
+ // For directory buckets, only server-side encryption with Amazon S3 managed keys
+ // (SSE-S3) ( AES256 ) is supported.
+ //
+ // [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html
ServerSideEncryption types.ServerSideEncryption
// If the x-amz-storage-class header is not used, the copied object will be stored
// in the STANDARD Storage Class by default. The STANDARD storage class provides
// high durability and high availability. Depending on performance needs, you can
// specify a different Storage Class.
+ //
// - Directory buckets - For directory buckets, only the S3 Express One Zone
// storage class is supported to store newly created objects. Unsupported storage
// class values won't write a destination object and will respond with the HTTP
// status code 400 Bad Request .
+ //
// - Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage Class.
+ //
// You can use the CopyObject action to change the storage class of an object that
// is already stored in Amazon S3 by using the x-amz-storage-class header. For
- // more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
- // in the Amazon S3 User Guide. Before using an object as a source object for the
- // copy operation, you must restore a copy of it if it meets any of the following
- // conditions:
+ // more information, see [Storage Classes]in the Amazon S3 User Guide.
+ //
+ // Before using an object as a source object for the copy operation, you must
+ // restore a copy of it if it meets any of the following conditions:
+ //
// - The storage class of the source object is GLACIER or DEEP_ARCHIVE .
- // - The storage class of the source object is INTELLIGENT_TIERING and it's S3
- // Intelligent-Tiering access tier (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition)
- // is Archive Access or Deep Archive Access .
- // For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html)
- // and Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html)
- // in the Amazon S3 User Guide.
+ //
+ // - The storage class of the source object is INTELLIGENT_TIERING and it's [S3 Intelligent-Tiering access tier]is
+ // Archive Access or Deep Archive Access .
+ //
+ // For more information, see [RestoreObject] and [Copying Objects] in the Amazon S3 User Guide.
+ //
+ // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
+ // [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html
+ // [Copying Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html
+ // [S3 Intelligent-Tiering access tier]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition
StorageClass types.StorageClass
// The tag-set for the object copy in the destination bucket. This value must be
@@ -503,60 +640,82 @@ type CopyObjectInput struct {
// the x-amz-tagging-directive . If you choose COPY for the x-amz-tagging-directive
// , you don't need to set the x-amz-tagging header, because the tag-set will be
// copied from the source object directly. The tag-set must be encoded as URL Query
- // parameters. The default value is the empty value. Directory buckets - For
- // directory buckets in a CopyObject operation, only the empty tag-set is
- // supported. Any requests that attempt to write non-empty tags into directory
- // buckets will receive a 501 Not Implemented status code. When the destination
- // bucket is a directory bucket, you will receive a 501 Not Implemented response
- // in any of the following situations:
+ // parameters.
+ //
+ // The default value is the empty value.
+ //
+ // Directory buckets - For directory buckets in a CopyObject operation, only the
+ // empty tag-set is supported. Any requests that attempt to write non-empty tags
+ // into directory buckets will receive a 501 Not Implemented status code. When the
+ // destination bucket is a directory bucket, you will receive a 501 Not Implemented
+ // response in any of the following situations:
+ //
// - When you attempt to COPY the tag-set from an S3 source object that has
// non-empty tags.
+ //
// - When you attempt to REPLACE the tag-set of a source object and set a
// non-empty value to x-amz-tagging .
+ //
// - When you don't set the x-amz-tagging-directive header and the source object
// has non-empty tags. This is because the default value of
// x-amz-tagging-directive is COPY .
+ //
// Because only the empty tag-set is supported for directory buckets in a
// CopyObject operation, the following situations are allowed:
+ //
// - When you attempt to COPY the tag-set from a directory bucket source object
// that has no tags to a general purpose bucket. It copies an empty tag-set to the
// destination object.
+ //
// - When you attempt to REPLACE the tag-set of a directory bucket source object
// and set the x-amz-tagging value of the directory bucket destination object to
// empty.
+ //
// - When you attempt to REPLACE the tag-set of a general purpose bucket source
// object that has non-empty tags and set the x-amz-tagging value of the
// directory bucket destination object to empty.
+ //
// - When you attempt to REPLACE the tag-set of a directory bucket source object
// and don't set the x-amz-tagging value of the directory bucket destination
// object. This is because the default value of x-amz-tagging is the empty value.
Tagging *string
// Specifies whether the object tag-set is copied from the source object or
- // replaced with the tag-set that's provided in the request. The default value is
- // COPY . Directory buckets - For directory buckets in a CopyObject operation,
- // only the empty tag-set is supported. Any requests that attempt to write
- // non-empty tags into directory buckets will receive a 501 Not Implemented status
- // code. When the destination bucket is a directory bucket, you will receive a 501
- // Not Implemented response in any of the following situations:
+ // replaced with the tag-set that's provided in the request.
+ //
+ // The default value is COPY .
+ //
+ // Directory buckets - For directory buckets in a CopyObject operation, only the
+ // empty tag-set is supported. Any requests that attempt to write non-empty tags
+ // into directory buckets will receive a 501 Not Implemented status code. When the
+ // destination bucket is a directory bucket, you will receive a 501 Not Implemented
+ // response in any of the following situations:
+ //
// - When you attempt to COPY the tag-set from an S3 source object that has
// non-empty tags.
+ //
// - When you attempt to REPLACE the tag-set of a source object and set a
// non-empty value to x-amz-tagging .
+ //
// - When you don't set the x-amz-tagging-directive header and the source object
// has non-empty tags. This is because the default value of
// x-amz-tagging-directive is COPY .
+ //
// Because only the empty tag-set is supported for directory buckets in a
// CopyObject operation, the following situations are allowed:
+ //
// - When you attempt to COPY the tag-set from a directory bucket source object
// that has no tags to a general purpose bucket. It copies an empty tag-set to the
// destination object.
+ //
// - When you attempt to REPLACE the tag-set of a directory bucket source object
// and set the x-amz-tagging value of the directory bucket destination object to
// empty.
+ //
// - When you attempt to REPLACE the tag-set of a general purpose bucket source
// object that has non-empty tags and set the x-amz-tagging value of the
// directory bucket destination object to empty.
+ //
// - When you attempt to REPLACE the tag-set of a directory bucket source object
// and don't set the x-amz-tagging value of the directory bucket destination
// object. This is because the default value of x-amz-tagging is the empty value.
@@ -567,8 +726,9 @@ type CopyObjectInput struct {
// Amazon S3 stores the value of this header in the object metadata. This value is
// unique to each object and is not copied when using the x-amz-metadata-directive
// header. Instead, you may opt to provide this header in combination with the
- // x-amz-metadata-directive header. This functionality is not supported for
- // directory buckets.
+ // x-amz-metadata-directive header.
+ //
+ // This functionality is not supported for directory buckets.
WebsiteRedirectLocation *string
noSmithyDocumentSerde
@@ -582,56 +742,68 @@ func (in *CopyObjectInput) bindEndpointParams(p *EndpointParameters) {
type CopyObjectOutput struct {
// Indicates whether the copied object uses an S3 Bucket Key for server-side
- // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality
- // is not supported for directory buckets.
+ // encryption with Key Management Service (KMS) keys (SSE-KMS).
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool
// Container for all response elements.
CopyObjectResult *types.CopyObjectResult
- // Version ID of the source object that was copied. This functionality is not
- // supported when the source object is in a directory bucket.
+ // Version ID of the source object that was copied.
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
CopySourceVersionId *string
- // If the object expiration is configured, the response includes this header. This
- // functionality is not supported for directory buckets.
+ // If the object expiration is configured, the response includes this header.
+ //
+ // This functionality is not supported for directory buckets.
Expiration *string
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// If server-side encryption with a customer-provided encryption key was
// requested, the response will include this header to confirm the encryption
- // algorithm that's used. This functionality is not supported for directory
- // buckets.
+ // algorithm that's used.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string
// If server-side encryption with a customer-provided encryption key was
// requested, the response will include this header to provide the round-trip
- // message integrity verification of the customer-provided encryption key. This
- // functionality is not supported for directory buckets.
+ // message integrity verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string
// If present, indicates the Amazon Web Services KMS Encryption Context to use for
// object encryption. The value of this header is a base64-encoded UTF-8 string
- // holding JSON with the encryption context key-value pairs. This functionality is
- // not supported for directory buckets.
+ // holding JSON with the encryption context key-value pairs.
+ //
+ // This functionality is not supported for directory buckets.
SSEKMSEncryptionContext *string
// If present, indicates the ID of the Key Management Service (KMS) symmetric
- // encryption customer managed key that was used for the object. This functionality
- // is not supported for directory buckets.
+ // encryption customer managed key that was used for the object.
+ //
+ // This functionality is not supported for directory buckets.
SSEKMSKeyId *string
// The server-side encryption algorithm used when you store this object in Amazon
- // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only
- // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is
- // supported.
+ // S3 (for example, AES256 , aws:kms , aws:kms:dsse ).
+ //
+ // For directory buckets, only server-side encryption with Amazon S3 managed keys
+ // (SSE-S3) ( AES256 ) is supported.
ServerSideEncryption types.ServerSideEncryption
- // Version ID of the newly created copy. This functionality is not supported for
- // directory buckets.
+ // Version ID of the newly created copy.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go
index b39244bcfe..34174b2320 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go
@@ -15,89 +15,116 @@ import (
)
// This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts
-// bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html)
-// . Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and
-// have a valid Amazon Web Services Access Key ID to authenticate requests.
-// Anonymous requests are never allowed to create buckets. By creating the bucket,
-// you become the bucket owner. There are two types of buckets: general purpose
-// buckets and directory buckets. For more information about these bucket types,
-// see Creating, configuring, and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html)
-// in the Amazon S3 User Guide.
+// bucket, see [CreateBucket]CreateBucket .
+//
+// Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have
+// a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous
+// requests are never allowed to create buckets. By creating the bucket, you become
+// the bucket owner.
+//
+// There are two types of buckets: general purpose buckets and directory buckets.
+// For more information about these bucket types, see [Creating, configuring, and working with Amazon S3 buckets]in the Amazon S3 User Guide.
+//
// - General purpose buckets - If you send your CreateBucket request to the
// s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region. So
// the signature calculations in Signature Version 4 must use us-east-1 as the
// Region, even if the location constraint in the request specifies another Region
// where the bucket is to be created. If you create a bucket in a Region other than
// US East (N. Virginia), your application must be able to handle 307 redirect. For
-// more information, see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html)
-// in the Amazon S3 User Guide.
+// more information, see [Virtual hosting of buckets]in the Amazon S3 User Guide.
+//
// - Directory buckets - For directory buckets, you must make requests for this
// API operation to the Regional endpoint. These endpoints support path-style
// requests in the format
// https://s3express-control.region_code.amazonaws.com/bucket-name .
-// Virtual-hosted-style requests aren't supported. For more information, see
-// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide.
+// Virtual-hosted-style requests aren't supported. For more information, see [Regional and Zonal endpoints]in
+// the Amazon S3 User Guide.
//
// Permissions
+//
// - General purpose bucket permissions - In addition to the s3:CreateBucket
// permission, the following permissions are required in a policy when your
// CreateBucket request includes specific headers:
+//
// - Access control lists (ACLs) - In your CreateBucket request, if you specify
// an access control list (ACL) and set it to public-read , public-read-write ,
// authenticated-read , or if you explicitly specify any other custom ACLs, both
// s3:CreateBucket and s3:PutBucketAcl permissions are required. In your
// CreateBucket request, if you set the ACL to private , or if you don't specify
// any ACLs, only the s3:CreateBucket permission is required.
+//
// - Object Lock - In your CreateBucket request, if you set
// x-amz-bucket-object-lock-enabled to true, the
// s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are
// required.
+//
// - S3 Object Ownership - If your CreateBucket request includes the
// x-amz-object-ownership header, then the s3:PutBucketOwnershipControls
-// permission is required. To set an ACL on a bucket as part of a CreateBucket
-// request, you must explicitly set S3 Object Ownership for the bucket to a
-// different value than the default, BucketOwnerEnforced . Additionally, if your
-// desired bucket ACL grants public access, you must first create the bucket
-// (without the bucket ACL) and then explicitly disable Block Public Access on the
-// bucket before using PutBucketAcl to set the ACL. If you try to create a bucket
-// with a public ACL, the request will fail. For the majority of modern use cases
-// in S3, we recommend that you keep all Block Public Access settings enabled and
-// keep ACLs disabled. If you would like to share data with users outside of your
-// account, you can use bucket policies as needed. For more information, see
-// Controlling ownership of objects and disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
-// and Blocking public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html)
-// in the Amazon S3 User Guide.
-// - S3 Block Public Access - If your specific use case requires granting public
-// access to your S3 resources, you can disable Block Public Access. Specifically,
-// you can create a new bucket with Block Public Access enabled, then separately
-// call the DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html)
-// API. To use this operation, you must have the s3:PutBucketPublicAccessBlock
-// permission. For more information about S3 Block Public Access, see Blocking
-// public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html)
-// in the Amazon S3 User Guide.
-// - Directory bucket permissions - You must have the s3express:CreateBucket
-// permission in an IAM identity-based policy instead of a bucket policy.
-// Cross-account access to this API operation isn't supported. This operation can
-// only be performed by the Amazon Web Services account that owns the resource. For
-// more information about directory bucket policies and permissions, see Amazon
-// Web Services Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html)
-// in the Amazon S3 User Guide. The permissions for ACLs, Object Lock, S3 Object
-// Ownership, and S3 Block Public Access are not supported for directory buckets.
-// For directory buckets, all Block Public Access settings are enabled at the
-// bucket level and S3 Object Ownership is set to Bucket owner enforced (ACLs
-// disabled). These settings can't be modified. For more information about
-// permissions for creating and working with directory buckets, see Directory
-// buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html)
-// in the Amazon S3 User Guide. For more information about supported S3 features
-// for directory buckets, see Features of S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features)
-// in the Amazon S3 User Guide.
+// permission is required.
+//
+// To set an ACL on a bucket as part of a CreateBucket request, you must explicitly
+//
+// set S3 Object Ownership for the bucket to a different value than the default,
+// BucketOwnerEnforced . Additionally, if your desired bucket ACL grants public
+// access, you must first create the bucket (without the bucket ACL) and then
+// explicitly disable Block Public Access on the bucket before using PutBucketAcl
+// to set the ACL. If you try to create a bucket with a public ACL, the request
+// will fail.
+//
+// For the majority of modern use cases in S3, we recommend that you keep all
+//
+// Block Public Access settings enabled and keep ACLs disabled. If you would like
+// to share data with users outside of your account, you can use bucket policies as
+// needed. For more information, see [Controlling ownership of objects and disabling ACLs for your bucket]and [Blocking public access to your Amazon S3 storage]in the Amazon S3 User Guide.
+//
+// - S3 Block Public Access - If your specific use case requires granting public
+// access to your S3 resources, you can disable Block Public Access. Specifically,
+// you can create a new bucket with Block Public Access enabled, then separately
+// call the [DeletePublicAccessBlock]DeletePublicAccessBlock API. To use this operation, you must have the
+// s3:PutBucketPublicAccessBlock permission. For more information about S3 Block
+// Public Access, see [Blocking public access to your Amazon S3 storage]in the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - You must have the s3express:CreateBucket
+// permission in an IAM identity-based policy instead of a bucket policy.
+// Cross-account access to this API operation isn't supported. This operation can
+// only be performed by the Amazon Web Services account that owns the resource. For
+// more information about directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the
+// Amazon S3 User Guide.
+//
+// The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public
+//
+// Access are not supported for directory buckets. For directory buckets, all Block
+// Public Access settings are enabled at the bucket level and S3 Object Ownership
+// is set to Bucket owner enforced (ACLs disabled). These settings can't be
+// modified.
+//
+// For more information about permissions for creating and working with directory
+//
+// buckets, see [Directory buckets]in the Amazon S3 User Guide. For more information about
+// supported S3 features for directory buckets, see [Features of S3 Express One Zone]in the Amazon S3 User Guide.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// s3express-control.region.amazonaws.com .
+//
+// The following operations are related to CreateBucket :
+//
+// [PutObject]
+//
+// [DeleteBucket]
+//
+// [Creating, configuring, and working with Amazon S3 buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
+// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html
+// [Virtual hosting of buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
//
-// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
-// s3express-control.region.amazonaws.com . The following operations are related to
-// CreateBucket :
-// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
-// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html)
+// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html
+// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html
+// [Features of S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features
+// [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
+// [Blocking public access to your Amazon S3 storage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html
+// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html
func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, optFns ...func(*Options)) (*CreateBucketOutput, error) {
if params == nil {
params = &CreateBucketInput{}
@@ -115,71 +142,93 @@ func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, op
type CreateBucketInput struct {
- // The name of the bucket to create. General purpose buckets - For information
- // about bucket naming restrictions, see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html)
- // in the Amazon S3 User Guide. Directory buckets - When you use this operation
- // with a directory bucket, you must use path-style requests in the format
+ // The name of the bucket to create.
+ //
+ // General purpose buckets - For information about bucket naming restrictions, see [Bucket naming rules]
+ // in the Amazon S3 User Guide.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use path-style requests in the format
// https://s3express-control.region_code.amazonaws.com/bucket-name .
// Virtual-hosted-style requests aren't supported. Directory bucket names must be
// unique in the chosen Availability Zone. Bucket names must also follow the format
// bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
- // ). For information about bucket naming restrictions, see Directory bucket
- // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide
+ // ). For information about bucket naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User
+ // Guide
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [Bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
//
// This member is required.
Bucket *string
- // The canned ACL to apply to the bucket. This functionality is not supported for
- // directory buckets.
+ // The canned ACL to apply to the bucket.
+ //
+ // This functionality is not supported for directory buckets.
ACL types.BucketCannedACL
// The configuration information for the bucket.
CreateBucketConfiguration *types.CreateBucketConfiguration
// Allows grantee the read, write, read ACP, and write ACP permissions on the
- // bucket. This functionality is not supported for directory buckets.
+ // bucket.
+ //
+ // This functionality is not supported for directory buckets.
GrantFullControl *string
- // Allows grantee to list the objects in the bucket. This functionality is not
- // supported for directory buckets.
+ // Allows grantee to list the objects in the bucket.
+ //
+ // This functionality is not supported for directory buckets.
GrantRead *string
- // Allows grantee to read the bucket ACL. This functionality is not supported for
- // directory buckets.
+ // Allows grantee to read the bucket ACL.
+ //
+ // This functionality is not supported for directory buckets.
GrantReadACP *string
- // Allows grantee to create new objects in the bucket. For the bucket and object
- // owners of existing objects, also allows deletions and overwrites of those
- // objects. This functionality is not supported for directory buckets.
+ // Allows grantee to create new objects in the bucket.
+ //
+ // For the bucket and object owners of existing objects, also allows deletions and
+ // overwrites of those objects.
+ //
+ // This functionality is not supported for directory buckets.
GrantWrite *string
- // Allows grantee to write the ACL for the applicable bucket. This functionality
- // is not supported for directory buckets.
+ // Allows grantee to write the ACL for the applicable bucket.
+ //
+ // This functionality is not supported for directory buckets.
GrantWriteACP *string
// Specifies whether you want S3 Object Lock to be enabled for the new bucket.
+ //
// This functionality is not supported for directory buckets.
ObjectLockEnabledForBucket *bool
// The container element for object ownership for a bucket's ownership controls.
+ //
// BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the
// bucket owner if the objects are uploaded with the bucket-owner-full-control
- // canned ACL. ObjectWriter - The uploading account will own the object if the
- // object is uploaded with the bucket-owner-full-control canned ACL.
+ // canned ACL.
+ //
+ // ObjectWriter - The uploading account will own the object if the object is
+ // uploaded with the bucket-owner-full-control canned ACL.
+ //
// BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer
// affect permissions. The bucket owner automatically owns and has full control
// over every object in the bucket. The bucket only accepts PUT requests that don't
// specify an ACL or specify bucket owner full control ACLs (such as the predefined
// bucket-owner-full-control canned ACL or a custom ACL in XML format that grants
- // the same permissions). By default, ObjectOwnership is set to BucketOwnerEnforced
- // and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon
- // use cases where you must control access for each object individually. For more
- // information about S3 Object Ownership, see Controlling ownership of objects and
- // disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets. Directory buckets use the bucket owner enforced setting for S3 Object
- // Ownership.
+ // the same permissions).
+ //
+ // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are
+ // disabled. We recommend keeping ACLs disabled, except in uncommon use cases where
+ // you must control access for each object individually. For more information about
+ // S3 Object Ownership, see [Controlling ownership of objects and disabling ACLs for your bucket]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets. Directory buckets
+ // use the bucket owner enforced setting for S3 Object Ownership.
+ //
+ // [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
ObjectOwnership types.ObjectOwnership
noSmithyDocumentSerde
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go
index c083c32d8e..9693c4cbd8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go
@@ -16,62 +16,64 @@ import (
// This action initiates a multipart upload and returns an upload ID. This upload
// ID is used to associate all of the parts in the specific multipart upload. You
-// specify this upload ID in each of your subsequent upload part requests (see
-// UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
-// ). You also include this upload ID in the final request to either complete or
-// abort the multipart upload request. For more information about multipart
-// uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html)
-// in the Amazon S3 User Guide. After you initiate a multipart upload and upload
-// one or more parts, to stop being charged for storing the uploaded parts, you
-// must either complete or abort the multipart upload. Amazon S3 frees up the space
-// used to store the parts and stops charging you for storing them only after you
-// either complete or abort a multipart upload. If you have configured a lifecycle
-// rule to abort incomplete multipart uploads, the created multipart upload must be
-// completed within the number of days specified in the bucket lifecycle
-// configuration. Otherwise, the incomplete multipart upload becomes eligible for
-// an abort action and Amazon S3 aborts the multipart upload. For more information,
-// see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
-// .
+// specify this upload ID in each of your subsequent upload part requests (see [UploadPart]).
+// You also include this upload ID in the final request to either complete or abort
+// the multipart upload request. For more information about multipart uploads, see [Multipart Upload Overview]
+// in the Amazon S3 User Guide.
+//
+// After you initiate a multipart upload and upload one or more parts, to stop
+// being charged for storing the uploaded parts, you must either complete or abort
+// the multipart upload. Amazon S3 frees up the space used to store the parts and
+// stops charging you for storing them only after you either complete or abort a
+// multipart upload.
+//
+// If you have configured a lifecycle rule to abort incomplete multipart uploads,
+// the created multipart upload must be completed within the number of days
+// specified in the bucket lifecycle configuration. Otherwise, the incomplete
+// multipart upload becomes eligible for an abort action and Amazon S3 aborts the
+// multipart upload. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration].
+//
// - Directory buckets - S3 Lifecycle is not supported by directory buckets.
+//
// - Directory buckets - For directory buckets, you must make requests for this
// API operation to the Zonal endpoint. These endpoints support
// virtual-hosted-style requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name .
-// Path-style requests are not supported. For more information, see Regional and
-// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide.
+// Path-style requests are not supported. For more information, see [Regional and Zonal endpoints]in the
+// Amazon S3 User Guide.
//
// Request signing For request signing, multipart upload is just a series of
// regular requests. You initiate a multipart upload, send one or more requests to
// upload parts, and then complete the multipart upload process. You sign each
// request individually. There is nothing special about signing multipart upload
-// requests. For more information about signing, see Authenticating Requests
-// (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html)
-// in the Amazon S3 User Guide. Permissions
+// requests. For more information about signing, see [Authenticating Requests (Amazon Web Services Signature Version 4)]in the Amazon S3 User Guide.
+//
+// Permissions
// - General purpose bucket permissions - For information about the permissions
-// required to use the multipart upload API, see Multipart upload and permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
-// in the Amazon S3 User Guide. To perform a multipart upload with encryption by
-// using an Amazon Web Services KMS key, the requester must have permission to the
-// kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are
-// required because Amazon S3 must decrypt and read data from the encrypted file
-// parts before it completes the multipart upload. For more information, see
-// Multipart upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions)
-// and Protecting data using server-side encryption with Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html)
-// in the Amazon S3 User Guide.
-// - Directory bucket permissions - To grant access to this API operation on a
-// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// API operation for session-based authorization. Specifically, you grant the
-// s3express:CreateSession permission to the directory bucket in a bucket policy
-// or an IAM identity-based policy. Then, you make the CreateSession API call on
-// the bucket to obtain a session token. With the session token in your request
-// header, you can make API requests to this operation. After the session token
-// expires, you make another CreateSession API call to generate a new session
-// token for use. Amazon Web Services CLI or SDKs create session and refresh the
-// session token automatically to avoid service interruptions when a session
-// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// .
+// required to use the multipart upload API, see [Multipart upload and permissions]in the Amazon S3 User Guide.
+//
+// To perform a multipart upload with encryption by using an Amazon Web Services
+//
+// KMS key, the requester must have permission to the kms:Decrypt and
+// kms:GenerateDataKey* actions on the key. These permissions are required
+// because Amazon S3 must decrypt and read data from the encrypted file parts
+// before it completes the multipart upload. For more information, see [Multipart upload API and permissions]and [Protecting data using server-side encryption with Amazon Web Services KMS]in
+// the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - To grant access to this API operation on a
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
+// s3express:CreateSession permission to the directory bucket in a bucket policy
+// or an IAM identity-based policy. Then, you make the CreateSession API call on
+// the bucket to obtain a session token. With the session token in your request
+// header, you can make API requests to this operation. After the session token
+// expires, you make another CreateSession API call to generate a new session
+// token for use. Amazon Web Services CLI or SDKs create session and refresh the
+// session token automatically to avoid service interruptions when a session
+// expires. For more information about authorization, see [CreateSession]CreateSession .
//
// Encryption
+//
// - General purpose buckets - Server-side encryption is for data encryption at
// rest. Amazon S3 encrypts your data as it writes it to disks in its data centers
// and decrypts it when you access it. Amazon S3 automatically encrypts all new
@@ -91,61 +93,96 @@ import (
// in your request is different from the default encryption configuration of the
// destination bucket, the encryption setting in your request takes precedence. If
// you choose to provide your own encryption key, the request headers you provide
-// in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
-// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html)
-// requests must match the headers you used in the CreateMultipartUpload request.
+// in [UploadPart]and [UploadPartCopy]requests must match the headers you used in the CreateMultipartUpload
+// request.
+//
// - Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (
// aws/s3 ) and KMS customer managed keys stored in Key Management Service (KMS)
// – If you want Amazon Web Services to manage the keys used to encrypt data,
// specify the following headers in the request.
+//
// - x-amz-server-side-encryption
+//
// - x-amz-server-side-encryption-aws-kms-key-id
+//
// - x-amz-server-side-encryption-context
+//
// - If you specify x-amz-server-side-encryption:aws:kms , but don't provide
// x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web
// Services managed key ( aws/s3 key) in KMS to protect the data.
+//
// - To perform a multipart upload with encryption by using an Amazon Web
// Services KMS key, the requester must have permission to the kms:Decrypt and
// kms:GenerateDataKey* actions on the key. These permissions are required
// because Amazon S3 must decrypt and read data from the encrypted file parts
-// before it completes the multipart upload. For more information, see Multipart
-// upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions)
-// and Protecting data using server-side encryption with Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html)
-// in the Amazon S3 User Guide.
+// before it completes the multipart upload. For more information, see [Multipart upload API and permissions]and [Protecting data using server-side encryption with Amazon Web Services KMS]in
+// the Amazon S3 User Guide.
+//
// - If your Identity and Access Management (IAM) user or role is in the same
// Amazon Web Services account as the KMS key, then you must have these permissions
// on the key policy. If your IAM user or role is in a different account from the
// key, then you must have the permissions on both the key policy and your IAM user
// or role.
+//
// - All GET and PUT requests for an object protected by KMS fail if you don't
// make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS),
// or Signature Version 4. For information about configuring any of the officially
-// supported Amazon Web Services SDKs and Amazon Web Services CLI, see
-// Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version)
-// in the Amazon S3 User Guide. For more information about server-side
-// encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side
-// Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html)
-// in the Amazon S3 User Guide.
-// - Use customer-provided encryption keys (SSE-C) – If you want to manage your
-// own encryption keys, provide all the following headers in the request.
-// - x-amz-server-side-encryption-customer-algorithm
-// - x-amz-server-side-encryption-customer-key
-// - x-amz-server-side-encryption-customer-key-MD5 For more information about
-// server-side encryption with customer-provided encryption keys (SSE-C), see
-// Protecting data using server-side encryption with customer-provided encryption
-// keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html)
-// in the Amazon S3 User Guide.
-// - Directory buckets -For directory buckets, only server-side encryption with
-// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported.
-//
-// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
-// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are
-// related to CreateMultipartUpload :
-// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
-// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
-// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
-// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
-// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
+// supported Amazon Web Services SDKs and Amazon Web Services CLI, see [Specifying the Signature Version in Request Authentication]in the
+// Amazon S3 User Guide.
+//
+// For more information about server-side encryption with KMS keys (SSE-KMS), see [Protecting Data Using Server-Side Encryption with KMS keys]
+//
+// in the Amazon S3 User Guide.
+//
+// - Use customer-provided encryption keys (SSE-C) – If you want to manage your
+// own encryption keys, provide all the following headers in the request.
+//
+// - x-amz-server-side-encryption-customer-algorithm
+//
+// - x-amz-server-side-encryption-customer-key
+//
+// - x-amz-server-side-encryption-customer-key-MD5
+//
+// For more information about server-side encryption with customer-provided
+//
+// encryption keys (SSE-C), see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]in the Amazon S3 User Guide.
+//
+// - Directory buckets -For directory buckets, only server-side encryption with
+// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket_name.s3express-az_id.region.amazonaws.com .
+//
+// The following operations are related to CreateMultipartUpload :
+//
+// [UploadPart]
+//
+// [CompleteMultipartUpload]
+//
+// [AbortMultipartUpload]
+//
+// [ListParts]
+//
+// [ListMultipartUploads]
+//
+// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
+// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html
+// [Protecting Data Using Server-Side Encryption with KMS keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [Specifying the Signature Version in Request Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+// [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config
+// [Multipart upload and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
+// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions
+// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
+// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
+// [Authenticating Requests (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
+// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
+// [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html
+// [Protecting data using server-side encryption with Amazon Web Services KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html
+// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
+//
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+// [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html
func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultipartUploadInput, optFns ...func(*Options)) (*CreateMultipartUploadOutput, error) {
if params == nil {
params = &CreateMultipartUploadInput{}
@@ -164,30 +201,39 @@ func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultip
type CreateMultipartUploadInput struct {
// The name of the bucket where the multipart upload is initiated and where the
- // object is uploaded. Directory buckets - When you use this operation with a
- // directory bucket, you must use virtual-hosted-style requests in the format
+ // object is uploaded.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
- // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide. Access points - When you use this action with an
- // access point, you must provide the alias of the access point in place of the
- // bucket name or specify the access point ARN. When using the access point ARN,
- // you must direct requests to the access point hostname. The access point hostname
- // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. Access points and Object Lambda access points are
- // not supported by directory buckets. S3 on Outposts - When you use this action
- // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
- // hostname. The S3 on Outposts hostname takes the form
+ // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -199,32 +245,41 @@ type CreateMultipartUploadInput struct {
// The canned ACL to apply to the object. Amazon S3 supports a set of predefined
// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and
- // permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL)
- // in the Amazon S3 User Guide. By default, all objects are private. Only the owner
- // has full access control. When uploading an object, you can grant access
- // permissions to individual Amazon Web Services accounts or to predefined groups
- // defined by Amazon S3. These permissions are then added to the access control
- // list (ACL) on the new object. For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html)
- // . One way to grant the permissions using the request headers is to specify a
- // canned ACL with the x-amz-acl request header.
+ // permissions. For more information, see [Canned ACL]in the Amazon S3 User Guide.
+ //
+ // By default, all objects are private. Only the owner has full access control.
+ // When uploading an object, you can grant access permissions to individual Amazon
+ // Web Services accounts or to predefined groups defined by Amazon S3. These
+ // permissions are then added to the access control list (ACL) on the new object.
+ // For more information, see [Using ACLs]. One way to grant the permissions using the request
+ // headers is to specify a canned ACL with the x-amz-acl request header.
+ //
// - This functionality is not supported for directory buckets.
+ //
// - This functionality is not supported for Amazon S3 on Outposts.
+ //
+ // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
+ // [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html
ACL types.ObjectCannedACL
// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption
// with server-side encryption using Key Management Service (KMS) keys (SSE-KMS).
// Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object
- // encryption with SSE-KMS. Specifying this header with an object action doesn’t
- // affect bucket-level settings for S3 Bucket Key. This functionality is not
- // supported for directory buckets.
+ // encryption with SSE-KMS.
+ //
+ // Specifying this header with an object action doesn’t affect bucket-level
+ // settings for S3 Bucket Key.
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool
// Specifies caching behavior along the request/reply chain.
CacheControl *string
// Indicates the algorithm that you want Amazon S3 to use to create the checksum
- // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide.
+ // for the object. For more information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
// Specifies presentational information for the object.
@@ -232,8 +287,10 @@ type CreateMultipartUploadInput struct {
// Specifies what content encodings have been applied to the object and thus what
// decoding mechanisms must be applied to obtain the media-type referenced by the
- // Content-Type header field. For directory buckets, only the aws-chunked value is
- // supported in this header field.
+ // Content-Type header field.
+ //
+ // For directory buckets, only the aws-chunked value is supported in this header
+ // field.
ContentEncoding *string
// The language that the content is in.
@@ -251,207 +308,322 @@ type CreateMultipartUploadInput struct {
Expires *time.Time
// Specify access permissions explicitly to give the grantee READ, READ_ACP, and
- // WRITE_ACP permissions on the object. By default, all objects are private. Only
- // the owner has full access control. When uploading an object, you can use this
- // header to explicitly grant access permissions to specific Amazon Web Services
- // accounts or groups. This header maps to specific permissions that Amazon S3
- // supports in an ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
- // in the Amazon S3 User Guide. You specify each grantee as a type=value pair,
- // where the type is one of the following:
+ // WRITE_ACP permissions on the object.
+ //
+ // By default, all objects are private. Only the owner has full access control.
+ // When uploading an object, you can use this header to explicitly grant access
+ // permissions to specific Amazon Web Services accounts or groups. This header maps
+ // to specific permissions that Amazon S3 supports in an ACL. For more information,
+ // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide.
+ //
+ // You specify each grantee as a type=value pair, where the type is one of the
+ // following:
+ //
// - id – if the value specified is the canonical user ID of an Amazon Web
// Services account
+ //
// - uri – if you are granting permissions to a predefined group
+ //
// - emailAddress – if the value specified is the email address of an Amazon Web
- // Services account Using email addresses to specify a grantee is only supported in
- // the following Amazon Web Services Regions:
+ // Services account
+ //
+ // Using email addresses to specify a grantee is only supported in the following
+ // Amazon Web Services Regions:
+ //
// - US East (N. Virginia)
+ //
// - US West (N. California)
+ //
// - US West (Oregon)
+ //
// - Asia Pacific (Singapore)
+ //
// - Asia Pacific (Sydney)
+ //
// - Asia Pacific (Tokyo)
+ //
// - Europe (Ireland)
- // - South America (São Paulo) For a list of all the Amazon S3 supported Regions
- // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
- // in the Amazon Web Services General Reference.
+ //
+ // - South America (São Paulo)
+ //
+ // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the
+ // Amazon Web Services General Reference.
+ //
// For example, the following x-amz-grant-read header grants the Amazon Web
// Services accounts identified by account IDs permissions to read object data and
- // its metadata: x-amz-grant-read: id="11112222333", id="444455556666"
+ // its metadata:
+ //
+ // x-amz-grant-read: id="11112222333", id="444455556666"
+ //
// - This functionality is not supported for directory buckets.
+ //
// - This functionality is not supported for Amazon S3 on Outposts.
+ //
+ // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
+ // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
GrantFullControl *string
// Specify access permissions explicitly to allow grantee to read the object data
- // and its metadata. By default, all objects are private. Only the owner has full
- // access control. When uploading an object, you can use this header to explicitly
- // grant access permissions to specific Amazon Web Services accounts or groups.
- // This header maps to specific permissions that Amazon S3 supports in an ACL. For
- // more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
- // in the Amazon S3 User Guide. You specify each grantee as a type=value pair,
- // where the type is one of the following:
+ // and its metadata.
+ //
+ // By default, all objects are private. Only the owner has full access control.
+ // When uploading an object, you can use this header to explicitly grant access
+ // permissions to specific Amazon Web Services accounts or groups. This header maps
+ // to specific permissions that Amazon S3 supports in an ACL. For more information,
+ // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide.
+ //
+ // You specify each grantee as a type=value pair, where the type is one of the
+ // following:
+ //
// - id – if the value specified is the canonical user ID of an Amazon Web
// Services account
+ //
// - uri – if you are granting permissions to a predefined group
+ //
// - emailAddress – if the value specified is the email address of an Amazon Web
- // Services account Using email addresses to specify a grantee is only supported in
- // the following Amazon Web Services Regions:
+ // Services account
+ //
+ // Using email addresses to specify a grantee is only supported in the following
+ // Amazon Web Services Regions:
+ //
// - US East (N. Virginia)
+ //
// - US West (N. California)
+ //
// - US West (Oregon)
+ //
// - Asia Pacific (Singapore)
+ //
// - Asia Pacific (Sydney)
+ //
// - Asia Pacific (Tokyo)
+ //
// - Europe (Ireland)
- // - South America (São Paulo) For a list of all the Amazon S3 supported Regions
- // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
- // in the Amazon Web Services General Reference.
+ //
+ // - South America (São Paulo)
+ //
+ // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the
+ // Amazon Web Services General Reference.
+ //
// For example, the following x-amz-grant-read header grants the Amazon Web
// Services accounts identified by account IDs permissions to read object data and
- // its metadata: x-amz-grant-read: id="11112222333", id="444455556666"
+ // its metadata:
+ //
+ // x-amz-grant-read: id="11112222333", id="444455556666"
+ //
// - This functionality is not supported for directory buckets.
+ //
// - This functionality is not supported for Amazon S3 on Outposts.
+ //
+ // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
+ // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
GrantRead *string
// Specify access permissions explicitly to allows grantee to read the object ACL.
+ //
// By default, all objects are private. Only the owner has full access control.
// When uploading an object, you can use this header to explicitly grant access
// permissions to specific Amazon Web Services accounts or groups. This header maps
// to specific permissions that Amazon S3 supports in an ACL. For more information,
- // see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
- // in the Amazon S3 User Guide. You specify each grantee as a type=value pair,
- // where the type is one of the following:
+ // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide.
+ //
+ // You specify each grantee as a type=value pair, where the type is one of the
+ // following:
+ //
// - id – if the value specified is the canonical user ID of an Amazon Web
// Services account
+ //
// - uri – if you are granting permissions to a predefined group
+ //
// - emailAddress – if the value specified is the email address of an Amazon Web
- // Services account Using email addresses to specify a grantee is only supported in
- // the following Amazon Web Services Regions:
+ // Services account
+ //
+ // Using email addresses to specify a grantee is only supported in the following
+ // Amazon Web Services Regions:
+ //
// - US East (N. Virginia)
+ //
// - US West (N. California)
+ //
// - US West (Oregon)
+ //
// - Asia Pacific (Singapore)
+ //
// - Asia Pacific (Sydney)
+ //
// - Asia Pacific (Tokyo)
+ //
// - Europe (Ireland)
- // - South America (São Paulo) For a list of all the Amazon S3 supported Regions
- // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
- // in the Amazon Web Services General Reference.
+ //
+ // - South America (São Paulo)
+ //
+ // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the
+ // Amazon Web Services General Reference.
+ //
// For example, the following x-amz-grant-read header grants the Amazon Web
// Services accounts identified by account IDs permissions to read object data and
- // its metadata: x-amz-grant-read: id="11112222333", id="444455556666"
+ // its metadata:
+ //
+ // x-amz-grant-read: id="11112222333", id="444455556666"
+ //
// - This functionality is not supported for directory buckets.
+ //
// - This functionality is not supported for Amazon S3 on Outposts.
+ //
+ // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
+ // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
GrantReadACP *string
// Specify access permissions explicitly to allows grantee to allow grantee to
- // write the ACL for the applicable object. By default, all objects are private.
- // Only the owner has full access control. When uploading an object, you can use
- // this header to explicitly grant access permissions to specific Amazon Web
- // Services accounts or groups. This header maps to specific permissions that
- // Amazon S3 supports in an ACL. For more information, see Access Control List
- // (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
- // in the Amazon S3 User Guide. You specify each grantee as a type=value pair,
- // where the type is one of the following:
+ // write the ACL for the applicable object.
+ //
+ // By default, all objects are private. Only the owner has full access control.
+ // When uploading an object, you can use this header to explicitly grant access
+ // permissions to specific Amazon Web Services accounts or groups. This header maps
+ // to specific permissions that Amazon S3 supports in an ACL. For more information,
+ // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide.
+ //
+ // You specify each grantee as a type=value pair, where the type is one of the
+ // following:
+ //
// - id – if the value specified is the canonical user ID of an Amazon Web
// Services account
+ //
// - uri – if you are granting permissions to a predefined group
+ //
// - emailAddress – if the value specified is the email address of an Amazon Web
- // Services account Using email addresses to specify a grantee is only supported in
- // the following Amazon Web Services Regions:
+ // Services account
+ //
+ // Using email addresses to specify a grantee is only supported in the following
+ // Amazon Web Services Regions:
+ //
// - US East (N. Virginia)
+ //
// - US West (N. California)
+ //
// - US West (Oregon)
+ //
// - Asia Pacific (Singapore)
+ //
// - Asia Pacific (Sydney)
+ //
// - Asia Pacific (Tokyo)
+ //
// - Europe (Ireland)
- // - South America (São Paulo) For a list of all the Amazon S3 supported Regions
- // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
- // in the Amazon Web Services General Reference.
+ //
+ // - South America (São Paulo)
+ //
+ // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the
+ // Amazon Web Services General Reference.
+ //
// For example, the following x-amz-grant-read header grants the Amazon Web
// Services accounts identified by account IDs permissions to read object data and
- // its metadata: x-amz-grant-read: id="11112222333", id="444455556666"
+ // its metadata:
+ //
+ // x-amz-grant-read: id="11112222333", id="444455556666"
+ //
// - This functionality is not supported for directory buckets.
+ //
// - This functionality is not supported for Amazon S3 on Outposts.
+ //
+ // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
+ // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
GrantWriteACP *string
// A map of metadata to store with the object in S3.
Metadata map[string]string
- // Specifies whether you want to apply a legal hold to the uploaded object. This
- // functionality is not supported for directory buckets.
+ // Specifies whether you want to apply a legal hold to the uploaded object.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
// Specifies the Object Lock mode that you want to apply to the uploaded object.
+ //
// This functionality is not supported for directory buckets.
ObjectLockMode types.ObjectLockMode
- // Specifies the date and time when you want the Object Lock to expire. This
- // functionality is not supported for directory buckets.
+ // Specifies the date and time when you want the Object Lock to expire.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockRetainUntilDate *time.Time
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
- // Specifies the algorithm to use when encrypting the object (for example,
- // AES256). This functionality is not supported for directory buckets.
+ // Specifies the algorithm to use when encrypting the object (for example, AES256).
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string
// Specifies the customer-provided encryption key for Amazon S3 to use in
// encrypting data. This value is used to store the object and then it is
// discarded; Amazon S3 does not store the encryption key. The key must be
// appropriate for use with the algorithm specified in the
- // x-amz-server-side-encryption-customer-algorithm header. This functionality is
- // not supported for directory buckets.
+ // x-amz-server-side-encryption-customer-algorithm header.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKey *string
// Specifies the 128-bit MD5 digest of the customer-provided encryption key
// according to RFC 1321. Amazon S3 uses this header for a message integrity check
- // to ensure that the encryption key was transmitted without error. This
- // functionality is not supported for directory buckets.
+ // to ensure that the encryption key was transmitted without error.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string
// Specifies the Amazon Web Services KMS Encryption Context to use for object
// encryption. The value of this header is a base64-encoded UTF-8 string holding
- // JSON with the encryption context key-value pairs. This functionality is not
- // supported for directory buckets.
+ // JSON with the encryption context key-value pairs.
+ //
+ // This functionality is not supported for directory buckets.
SSEKMSEncryptionContext *string
// Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption
- // customer managed key to use for object encryption. This functionality is not
- // supported for directory buckets.
+ // customer managed key to use for object encryption.
+ //
+ // This functionality is not supported for directory buckets.
SSEKMSKeyId *string
// The server-side encryption algorithm used when you store this object in Amazon
- // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side
- // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported.
+ // S3 (for example, AES256 , aws:kms ).
+ //
+ // For directory buckets, only server-side encryption with Amazon S3 managed keys
+ // (SSE-S3) ( AES256 ) is supported.
ServerSideEncryption types.ServerSideEncryption
// By default, Amazon S3 uses the STANDARD Storage Class to store newly created
// objects. The STANDARD storage class provides high durability and high
// availability. Depending on performance needs, you can specify a different
- // Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
- // in the Amazon S3 User Guide.
+ // Storage Class. For more information, see [Storage Classes]in the Amazon S3 User Guide.
+ //
// - For directory buckets, only the S3 Express One Zone storage class is
// supported to store newly created objects.
+ //
// - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class.
+ //
+ // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
StorageClass types.StorageClass
- // The tag-set for the object. The tag-set must be encoded as URL Query
- // parameters. This functionality is not supported for directory buckets.
+ // The tag-set for the object. The tag-set must be encoded as URL Query parameters.
+ //
+ // This functionality is not supported for directory buckets.
Tagging *string
// If the bucket is configured as a website, redirects requests for this object to
// another object in the same bucket or to an external URL. Amazon S3 stores the
- // value of this header in the object metadata. This functionality is not supported
- // for directory buckets.
+ // value of this header in the object metadata.
+ //
+ // This functionality is not supported for directory buckets.
WebsiteRedirectLocation *string
noSmithyDocumentSerde
@@ -469,27 +641,33 @@ type CreateMultipartUploadOutput struct {
// incomplete multipart uploads and the prefix in the lifecycle rule matches the
// object name in the request, the response includes this header. The header
// indicates when the initiated multipart upload becomes eligible for an abort
- // operation. For more information, see Aborting Incomplete Multipart Uploads
- // Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
- // in the Amazon S3 User Guide. The response also includes the x-amz-abort-rule-id
- // header that provides the ID of the lifecycle configuration rule that defines the
- // abort action. This functionality is not supported for directory buckets.
+ // operation. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide.
+ //
+ // The response also includes the x-amz-abort-rule-id header that provides the ID
+ // of the lifecycle configuration rule that defines the abort action.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config
AbortDate *time.Time
// This header is returned along with the x-amz-abort-date header. It identifies
// the applicable lifecycle configuration rule that defines the action to abort
- // incomplete multipart uploads. This functionality is not supported for directory
- // buckets.
+ // incomplete multipart uploads.
+ //
+ // This functionality is not supported for directory buckets.
AbortRuleId *string
// The name of the bucket to which the multipart upload was initiated. Does not
- // return the access point ARN or access point alias if used. Access points are not
- // supported by directory buckets.
+ // return the access point ARN or access point alias if used.
+ //
+ // Access points are not supported by directory buckets.
Bucket *string
// Indicates whether the multipart upload uses an S3 Bucket Key for server-side
- // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality
- // is not supported for directory buckets.
+ // encryption with Key Management Service (KMS) keys (SSE-KMS).
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool
// The algorithm that was used to create a checksum of the object.
@@ -499,35 +677,43 @@ type CreateMultipartUploadOutput struct {
Key *string
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// If server-side encryption with a customer-provided encryption key was
// requested, the response will include this header to confirm the encryption
- // algorithm that's used. This functionality is not supported for directory
- // buckets.
+ // algorithm that's used.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string
// If server-side encryption with a customer-provided encryption key was
// requested, the response will include this header to provide the round-trip
- // message integrity verification of the customer-provided encryption key. This
- // functionality is not supported for directory buckets.
+ // message integrity verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string
// If present, indicates the Amazon Web Services KMS Encryption Context to use for
// object encryption. The value of this header is a base64-encoded UTF-8 string
- // holding JSON with the encryption context key-value pairs. This functionality is
- // not supported for directory buckets.
+ // holding JSON with the encryption context key-value pairs.
+ //
+ // This functionality is not supported for directory buckets.
SSEKMSEncryptionContext *string
// If present, indicates the ID of the Key Management Service (KMS) symmetric
- // encryption customer managed key that was used for the object. This functionality
- // is not supported for directory buckets.
+ // encryption customer managed key that was used for the object.
+ //
+ // This functionality is not supported for directory buckets.
SSEKMSKeyId *string
// The server-side encryption algorithm used when you store this object in Amazon
- // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side
- // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported.
+ // S3 (for example, AES256 , aws:kms ).
+ //
+ // For directory buckets, only server-side encryption with Amazon S3 managed keys
+ // (SSE-S3) ( AES256 ) is supported.
ServerSideEncryption types.ServerSideEncryption
// ID for the initiated multipart upload.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go
index e2d5a007d1..96f821d29c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go
@@ -17,62 +17,72 @@ import (
// Creates a session that establishes temporary security credentials to support
// fast authentication and authorization for the Zonal endpoint APIs on directory
// buckets. For more information about Zonal endpoint APIs that include the
-// Availability Zone in the request endpoint, see S3 Express One Zone APIs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html)
-// in the Amazon S3 User Guide. To make Zonal endpoint API requests on a directory
-// bucket, use the CreateSession API operation. Specifically, you grant
-// s3express:CreateSession permission to a bucket in a bucket policy or an IAM
-// identity-based policy. Then, you use IAM credentials to make the CreateSession
-// API request on the bucket, which returns temporary security credentials that
-// include the access key ID, secret access key, session token, and expiration.
-// These credentials have associated permissions to access the Zonal endpoint APIs.
-// After the session is created, you don’t need to use other policies to grant
-// permissions to each Zonal endpoint API individually. Instead, in your Zonal
-// endpoint API requests, you sign your requests by applying the temporary security
-// credentials of the session to the request headers and following the SigV4
-// protocol for authentication. You also apply the session token to the
-// x-amz-s3session-token request header for authorization. Temporary security
-// credentials are scoped to the bucket and expire after 5 minutes. After the
-// expiration time, any calls that you make with those credentials will fail. You
-// must use IAM credentials again to make a CreateSession API request that
-// generates a new set of temporary credentials for use. Temporary credentials
-// cannot be extended or refreshed beyond the original specified interval. If you
-// use Amazon Web Services SDKs, SDKs handle the session token refreshes
+// Availability Zone in the request endpoint, see [S3 Express One Zone APIs]in the Amazon S3 User Guide.
+//
+// To make Zonal endpoint API requests on a directory bucket, use the CreateSession
+// API operation. Specifically, you grant s3express:CreateSession permission to a
+// bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM
+// credentials to make the CreateSession API request on the bucket, which returns
+// temporary security credentials that include the access key ID, secret access
+// key, session token, and expiration. These credentials have associated
+// permissions to access the Zonal endpoint APIs. After the session is created, you
+// don’t need to use other policies to grant permissions to each Zonal endpoint API
+// individually. Instead, in your Zonal endpoint API requests, you sign your
+// requests by applying the temporary security credentials of the session to the
+// request headers and following the SigV4 protocol for authentication. You also
+// apply the session token to the x-amz-s3session-token request header for
+// authorization. Temporary security credentials are scoped to the bucket and
+// expire after 5 minutes. After the expiration time, any calls that you make with
+// those credentials will fail. You must use IAM credentials again to make a
+// CreateSession API request that generates a new set of temporary credentials for
+// use. Temporary credentials cannot be extended or refreshed beyond the original
+// specified interval.
+//
+// If you use Amazon Web Services SDKs, SDKs handle the session token refreshes
// automatically to avoid service interruptions when a session expires. We
// recommend that you use the Amazon Web Services SDKs to initiate and manage
-// requests to the CreateSession API. For more information, see Performance
-// guidelines and design patterns (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication)
-// in the Amazon S3 User Guide.
+// requests to the CreateSession API. For more information, see [Performance guidelines and design patterns]in the Amazon S3
+// User Guide.
+//
// - You must make requests for this API operation to the Zonal endpoint. These
// endpoints support virtual-hosted-style requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests
-// are not supported. For more information, see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide.
+// are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User Guide.
+//
// - CopyObject API operation - Unlike other Zonal endpoint APIs, the CopyObject
// API operation doesn't use the temporary security credentials returned from the
// CreateSession API operation for authentication and authorization. For
// information about authentication and authorization of the CopyObject API
-// operation on directory buckets, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html)
-// .
+// operation on directory buckets, see [CopyObject].
+//
// - HeadBucket API operation - Unlike other Zonal endpoint APIs, the HeadBucket
// API operation doesn't use the temporary security credentials returned from the
// CreateSession API operation for authentication and authorization. For
// information about authentication and authorization of the HeadBucket API
-// operation on directory buckets, see HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html)
-// .
+// operation on directory buckets, see [HeadBucket].
//
// Permissions To obtain temporary security credentials, you must create a bucket
// policy or an IAM identity-based policy that grants s3express:CreateSession
// permission to the bucket. In a policy, you can have the s3express:SessionMode
// condition key to control who can create a ReadWrite or ReadOnly session. For
-// more information about ReadWrite or ReadOnly sessions, see
-// x-amz-create-session-mode (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters)
-// . For example policies, see Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html)
-// and Amazon Web Services Identity and Access Management (IAM) identity-based
-// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html)
-// in the Amazon S3 User Guide. To grant cross-account access to Zonal endpoint
-// APIs, the bucket policy should also grant both accounts the
-// s3express:CreateSession permission. HTTP Host header syntax Directory buckets -
-// The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com .
+// more information about ReadWrite or ReadOnly sessions, see [x-amz-create-session-mode]
+// x-amz-create-session-mode . For example policies, see [Example bucket policies for S3 Express One Zone] and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone] in the Amazon S3
+// User Guide.
+//
+// To grant cross-account access to Zonal endpoint APIs, the bucket policy should
+// also grant both accounts the s3express:CreateSession permission.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket_name.s3express-az_id.region.amazonaws.com .
+//
+// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html
+// [Performance guidelines and design patterns]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication
+// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
+// [x-amz-create-session-mode]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters
+// [S3 Express One Zone APIs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html
+// [HeadBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html
func (c *Client) CreateSession(ctx context.Context, params *CreateSessionInput, optFns ...func(*Options)) (*CreateSessionOutput, error) {
if params == nil {
params = &CreateSessionInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go
index 30e1381bd6..e3a25ec195 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go
@@ -15,33 +15,43 @@ import (
// Deletes the S3 bucket. All objects (including all object versions and delete
// markers) in the bucket must be deleted before the bucket itself can be deleted.
+//
// - Directory buckets - If multipart uploads in a directory bucket are in
// progress, you can't delete the bucket until all the in-progress multipart
// uploads are aborted or completed.
+//
// - Directory buckets - For directory buckets, you must make requests for this
// API operation to the Regional endpoint. These endpoints support path-style
// requests in the format
// https://s3express-control.region_code.amazonaws.com/bucket-name .
-// Virtual-hosted-style requests aren't supported. For more information, see
-// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide.
+// Virtual-hosted-style requests aren't supported. For more information, see [Regional and Zonal endpoints]in
+// the Amazon S3 User Guide.
//
// Permissions
+//
// - General purpose bucket permissions - You must have the s3:DeleteBucket
// permission on the specified bucket in a policy.
+//
// - Directory bucket permissions - You must have the s3express:DeleteBucket
// permission in an IAM identity-based policy instead of a bucket policy.
// Cross-account access to this API operation isn't supported. This operation can
// only be performed by the Amazon Web Services account that owns the resource. For
-// more information about directory bucket policies and permissions, see Amazon
-// Web Services Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html)
-// in the Amazon S3 User Guide.
+// more information about directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the
+// Amazon S3 User Guide.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// s3express-control.region.amazonaws.com .
+//
+// The following operations are related to DeleteBucket :
//
-// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
-// s3express-control.region.amazonaws.com . The following operations are related to
-// DeleteBucket :
-// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
-// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
+// [CreateBucket]
+//
+// [DeleteObject]
+//
+// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
+// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html
func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*Options)) (*DeleteBucketOutput, error) {
if params == nil {
params = &DeleteBucketInput{}
@@ -59,24 +69,29 @@ func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, op
type DeleteBucketInput struct {
- // Specifies the bucket being deleted. Directory buckets - When you use this
- // operation with a directory bucket, you must use path-style requests in the
- // format https://s3express-control.region_code.amazonaws.com/bucket-name .
+ // Specifies the bucket being deleted.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use path-style requests in the format
+ // https://s3express-control.region_code.amazonaws.com/bucket-name .
// Virtual-hosted-style requests aren't supported. Directory bucket names must be
// unique in the chosen Availability Zone. Bucket names must also follow the format
// bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
- // ). For information about bucket naming restrictions, see Directory bucket
- // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide
+ // ). For information about bucket naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User
+ // Guide
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
//
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
- // status code 403 Forbidden (access denied). For directory buckets, this header
- // is not supported in this API operation. If you specify this header, the request
- // fails with the HTTP status code 501 Not Implemented .
+ // status code 403 Forbidden (access denied).
+ //
+ // For directory buckets, this header is not supported in this API operation. If
+ // you specify this header, the request fails with the HTTP status code 501 Not
+ // Implemented .
ExpectedBucketOwner *string
noSmithyDocumentSerde
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go
index 0033825a0a..b486fe62cb 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go
@@ -13,20 +13,32 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Deletes an analytics
-// configuration for the bucket (specified by the analytics configuration ID). To
-// use this operation, you must have permissions to perform the
+// This operation is not supported by directory buckets.
+//
+// Deletes an analytics configuration for the bucket (specified by the analytics
+// configuration ID).
+//
+// To use this operation, you must have permissions to perform the
// s3:PutAnalyticsConfiguration action. The bucket owner has this permission by
// default. The bucket owner can grant this permission to others. For more
-// information about permissions, see Permissions Related to Bucket Subresource
-// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// . For information about the Amazon S3 analytics feature, see Amazon S3
-// Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html)
-// . The following operations are related to DeleteBucketAnalyticsConfiguration :
-// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html)
-// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html)
-// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html)
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// For information about the Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis].
+//
+// The following operations are related to DeleteBucketAnalyticsConfiguration :
+//
+// [GetBucketAnalyticsConfiguration]
+//
+// [ListBucketAnalyticsConfigurations]
+//
+// [PutBucketAnalyticsConfiguration]
+//
+// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html
+// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html
+// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
func (c *Client) DeleteBucketAnalyticsConfiguration(ctx context.Context, params *DeleteBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketAnalyticsConfigurationOutput, error) {
if params == nil {
params = &DeleteBucketAnalyticsConfigurationInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go
index d465826fb4..ef3abb1256 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go
@@ -13,14 +13,25 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Deletes the cors
-// configuration information set for the bucket. To use this operation, you must
-// have permission to perform the s3:PutBucketCORS action. The bucket owner has
-// this permission by default and can grant this permission to others. For
-// information about cors , see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html)
-// in the Amazon S3 User Guide. Related Resources
-// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html)
-// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html)
+// This operation is not supported by directory buckets.
+//
+// Deletes the cors configuration information set for the bucket.
+//
+// To use this operation, you must have permission to perform the s3:PutBucketCORS
+// action. The bucket owner has this permission by default and can grant this
+// permission to others.
+//
+// For information about cors , see [Enabling Cross-Origin Resource Sharing] in the Amazon S3 User Guide.
+//
+// # Related Resources
+//
+// [PutBucketCors]
+//
+// [RESTOPTIONSobject]
+//
+// [PutBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html
+// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html
+// [RESTOPTIONSobject]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html
func (c *Client) DeleteBucketCors(ctx context.Context, params *DeleteBucketCorsInput, optFns ...func(*Options)) (*DeleteBucketCorsOutput, error) {
if params == nil {
params = &DeleteBucketCorsInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go
index 7be8c47590..fceb944c9c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go
@@ -13,20 +13,29 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. This implementation of
-// the DELETE action resets the default encryption for the bucket as server-side
-// encryption with Amazon S3 managed keys (SSE-S3). For information about the
-// bucket default encryption feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
-// in the Amazon S3 User Guide. To use this operation, you must have permissions to
-// perform the s3:PutEncryptionConfiguration action. The bucket owner has this
-// permission by default. The bucket owner can grant this permission to others. For
-// more information about permissions, see Permissions Related to Bucket
-// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// in the Amazon S3 User Guide. The following operations are related to
-// DeleteBucketEncryption :
-// - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html)
-// - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html)
+// This operation is not supported by directory buckets.
+//
+// This implementation of the DELETE action resets the default encryption for the
+// bucket as server-side encryption with Amazon S3 managed keys (SSE-S3). For
+// information about the bucket default encryption feature, see [Amazon S3 Bucket Default Encryption]in the Amazon S3
+// User Guide.
+//
+// To use this operation, you must have permissions to perform the
+// s3:PutEncryptionConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to your Amazon S3 Resources] in the Amazon S3 User Guide.
+//
+// The following operations are related to DeleteBucketEncryption :
+//
+// [PutBucketEncryption]
+//
+// [GetBucketEncryption]
+//
+// [GetBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html
+// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Managing Access Permissions to your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html
func (c *Client) DeleteBucketEncryption(ctx context.Context, params *DeleteBucketEncryptionInput, optFns ...func(*Options)) (*DeleteBucketEncryptionOutput, error) {
if params == nil {
params = &DeleteBucketEncryptionInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go
index 734d23b043..b7baf48abc 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go
@@ -13,25 +13,38 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Deletes the S3
-// Intelligent-Tiering configuration from the specified bucket. The S3
-// Intelligent-Tiering storage class is designed to optimize storage costs by
-// automatically moving data to the most cost-effective storage access tier,
+// This operation is not supported by directory buckets.
+//
+// Deletes the S3 Intelligent-Tiering configuration from the specified bucket.
+//
+// The S3 Intelligent-Tiering storage class is designed to optimize storage costs
+// by automatically moving data to the most cost-effective storage access tier,
// without performance impact or operational overhead. S3 Intelligent-Tiering
// delivers automatic cost savings in three low latency and high throughput access
// tiers. To get the lowest storage cost on data that can be accessed in minutes to
-// hours, you can choose to activate additional archiving capabilities. The S3
-// Intelligent-Tiering storage class is the ideal storage class for data with
-// unknown, changing, or unpredictable access patterns, independent of object size
-// or retention period. If the size of an object is less than 128 KB, it is not
-// monitored and not eligible for auto-tiering. Smaller objects can be stored, but
-// they are always charged at the Frequent Access tier rates in the S3
-// Intelligent-Tiering storage class. For more information, see Storage class for
-// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access)
-// . Operations related to DeleteBucketIntelligentTieringConfiguration include:
-// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html)
-// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html)
-// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html)
+// hours, you can choose to activate additional archiving capabilities.
+//
+// The S3 Intelligent-Tiering storage class is the ideal storage class for data
+// with unknown, changing, or unpredictable access patterns, independent of object
+// size or retention period. If the size of an object is less than 128 KB, it is
+// not monitored and not eligible for auto-tiering. Smaller objects can be stored,
+// but they are always charged at the Frequent Access tier rates in the S3
+// Intelligent-Tiering storage class.
+//
+// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects].
+//
+// Operations related to DeleteBucketIntelligentTieringConfiguration include:
+//
+// [GetBucketIntelligentTieringConfiguration]
+//
+// [PutBucketIntelligentTieringConfiguration]
+//
+// [ListBucketIntelligentTieringConfigurations]
+//
+// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html
+// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html
+// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html
+// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access
func (c *Client) DeleteBucketIntelligentTieringConfiguration(ctx context.Context, params *DeleteBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*DeleteBucketIntelligentTieringConfigurationOutput, error) {
if params == nil {
params = &DeleteBucketIntelligentTieringConfigurationInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go
index 3b8d81a439..ef1d8a24bf 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go
@@ -13,18 +13,32 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Deletes an inventory
-// configuration (identified by the inventory ID) from the bucket. To use this
-// operation, you must have permissions to perform the s3:PutInventoryConfiguration
-// action. The bucket owner has this permission by default. The bucket owner can
-// grant this permission to others. For more information about permissions, see
-// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html)
-// . Operations related to DeleteBucketInventoryConfiguration include:
-// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html)
-// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html)
-// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html)
+// This operation is not supported by directory buckets.
+//
+// Deletes an inventory configuration (identified by the inventory ID) from the
+// bucket.
+//
+// To use this operation, you must have permissions to perform the
+// s3:PutInventoryConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory].
+//
+// Operations related to DeleteBucketInventoryConfiguration include:
+//
+// [GetBucketInventoryConfiguration]
+//
+// [PutBucketInventoryConfiguration]
+//
+// [ListBucketInventoryConfigurations]
+//
+// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html
+// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html
+// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html
func (c *Client) DeleteBucketInventoryConfiguration(ctx context.Context, params *DeleteBucketInventoryConfigurationInput, optFns ...func(*Options)) (*DeleteBucketInventoryConfigurationOutput, error) {
if params == nil {
params = &DeleteBucketInventoryConfigurationInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go
index 88928b284a..ea6455f669 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go
@@ -13,20 +13,32 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Deletes the lifecycle
-// configuration from the specified bucket. Amazon S3 removes all the lifecycle
-// configuration rules in the lifecycle subresource associated with the bucket.
-// Your objects never expire, and Amazon S3 no longer automatically deletes any
-// objects on the basis of rules contained in the deleted lifecycle configuration.
+// This operation is not supported by directory buckets.
+//
+// Deletes the lifecycle configuration from the specified bucket. Amazon S3
+// removes all the lifecycle configuration rules in the lifecycle subresource
+// associated with the bucket. Your objects never expire, and Amazon S3 no longer
+// automatically deletes any objects on the basis of rules contained in the deleted
+// lifecycle configuration.
+//
// To use this operation, you must have permission to perform the
// s3:PutLifecycleConfiguration action. By default, the bucket owner has this
-// permission and the bucket owner can grant this permission to others. There is
-// usually some time lag before lifecycle configuration deletion is fully
-// propagated to all the Amazon S3 systems. For more information about the object
-// expiration, see Elements to Describe Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions)
-// . Related actions include:
-// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)
-// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html)
+// permission and the bucket owner can grant this permission to others.
+//
+// There is usually some time lag before lifecycle configuration deletion is fully
+// propagated to all the Amazon S3 systems.
+//
+// For more information about the object expiration, see [Elements to Describe Lifecycle Actions].
+//
+// Related actions include:
+//
+// [PutBucketLifecycleConfiguration]
+//
+// [GetBucketLifecycleConfiguration]
+//
+// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
+// [Elements to Describe Lifecycle Actions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions
+// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html
func (c *Client) DeleteBucketLifecycle(ctx context.Context, params *DeleteBucketLifecycleInput, optFns ...func(*Options)) (*DeleteBucketLifecycleOutput, error) {
if params == nil {
params = &DeleteBucketLifecycleInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go
index 21384351f5..ca0158b6bf 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go
@@ -13,22 +13,35 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Deletes a metrics
-// configuration for the Amazon CloudWatch request metrics (specified by the
-// metrics configuration ID) from the bucket. Note that this doesn't include the
-// daily storage metrics. To use this operation, you must have permissions to
-// perform the s3:PutMetricsConfiguration action. The bucket owner has this
-// permission by default. The bucket owner can grant this permission to others. For
-// more information about permissions, see Permissions Related to Bucket
-// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// . For information about CloudWatch request metrics for Amazon S3, see
-// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html)
-// . The following operations are related to DeleteBucketMetricsConfiguration :
-// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html)
-// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html)
-// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html)
-// - Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html)
+// This operation is not supported by directory buckets.
+//
+// Deletes a metrics configuration for the Amazon CloudWatch request metrics
+// (specified by the metrics configuration ID) from the bucket. Note that this
+// doesn't include the daily storage metrics.
+//
+// To use this operation, you must have permissions to perform the
+// s3:PutMetricsConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch].
+//
+// The following operations are related to DeleteBucketMetricsConfiguration :
+//
+// [GetBucketMetricsConfiguration]
+//
+// [PutBucketMetricsConfiguration]
+//
+// [ListBucketMetricsConfigurations]
+//
+// [Monitoring Metrics with Amazon CloudWatch]
+//
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html
+// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html
+// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html
+// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
func (c *Client) DeleteBucketMetricsConfiguration(ctx context.Context, params *DeleteBucketMetricsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetricsConfigurationOutput, error) {
if params == nil {
params = &DeleteBucketMetricsConfigurationInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go
index 4beac6b092..a2e89e4054 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go
@@ -13,14 +13,22 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Removes OwnershipControls
-// for an Amazon S3 bucket. To use this operation, you must have the
-// s3:PutBucketOwnershipControls permission. For more information about Amazon S3
-// permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
-// . For information about Amazon S3 Object Ownership, see Using Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html)
-// . The following operations are related to DeleteBucketOwnershipControls :
-// - GetBucketOwnershipControls
-// - PutBucketOwnershipControls
+// This operation is not supported by directory buckets.
+//
+// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you
+// must have the s3:PutBucketOwnershipControls permission. For more information
+// about Amazon S3 permissions, see [Specifying Permissions in a Policy].
+//
+// For information about Amazon S3 Object Ownership, see [Using Object Ownership].
+//
+// The following operations are related to DeleteBucketOwnershipControls :
+//
+// # GetBucketOwnershipControls
+//
+// # PutBucketOwnershipControls
+//
+// [Using Object Ownership]: https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html
+// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
func (c *Client) DeleteBucketOwnershipControls(ctx context.Context, params *DeleteBucketOwnershipControlsInput, optFns ...func(*Options)) (*DeleteBucketOwnershipControlsOutput, error) {
if params == nil {
params = &DeleteBucketOwnershipControlsInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go
index b8e1f56a14..745890828b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go
@@ -13,44 +13,57 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// Deletes the policy of a specified bucket. Directory buckets - For directory
-// buckets, you must make requests for this API operation to the Regional endpoint.
-// These endpoints support path-style requests in the format
-// https://s3express-control.region_code.amazonaws.com/bucket-name .
-// Virtual-hosted-style requests aren't supported. For more information, see
-// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide. Permissions If you are using an identity other than
-// the root user of the Amazon Web Services account that owns the bucket, the
-// calling identity must both have the DeleteBucketPolicy permissions on the
-// specified bucket and belong to the bucket owner's account in order to use this
-// operation. If you don't have DeleteBucketPolicy permissions, Amazon S3 returns
-// a 403 Access Denied error. If you have the correct permissions, but you're not
-// using an identity that belongs to the bucket owner's account, Amazon S3 returns
-// a 405 Method Not Allowed error. To ensure that bucket owners don't
-// inadvertently lock themselves out of their own buckets, the root principal in a
-// bucket owner's Amazon Web Services account can perform the GetBucketPolicy ,
-// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket
-// policy explicitly denies the root principal's access. Bucket owner root
-// principals can only be blocked from performing these API actions by VPC endpoint
-// policies and Amazon Web Services Organizations policies.
+// Deletes the policy of a specified bucket.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Regional endpoint. These endpoints support path-style requests
+// in the format https://s3express-control.region_code.amazonaws.com/bucket-name .
+// Virtual-hosted-style requests aren't supported. For more information, see [Regional and Zonal endpoints]in
+// the Amazon S3 User Guide.
+//
+// Permissions If you are using an identity other than the root user of the Amazon
+// Web Services account that owns the bucket, the calling identity must both have
+// the DeleteBucketPolicy permissions on the specified bucket and belong to the
+// bucket owner's account in order to use this operation.
+//
+// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403
+// Access Denied error. If you have the correct permissions, but you're not using
+// an identity that belongs to the bucket owner's account, Amazon S3 returns a 405
+// Method Not Allowed error.
+//
+// To ensure that bucket owners don't inadvertently lock themselves out of their
+// own buckets, the root principal in a bucket owner's Amazon Web Services account
+// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API
+// actions, even if their bucket policy explicitly denies the root principal's
+// access. Bucket owner root principals can only be blocked from performing these
+// API actions by VPC endpoint policies and Amazon Web Services Organizations
+// policies.
+//
// - General purpose bucket permissions - The s3:DeleteBucketPolicy permission is
// required in a policy. For more information about general purpose buckets bucket
-// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html)
-// in the Amazon S3 User Guide.
+// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide.
+//
// - Directory bucket permissions - To grant access to this API operation, you
// must have the s3express:DeleteBucketPolicy permission in an IAM identity-based
// policy instead of a bucket policy. Cross-account access to this API operation
// isn't supported. This operation can only be performed by the Amazon Web Services
// account that owns the resource. For more information about directory bucket
-// policies and permissions, see Amazon Web Services Identity and Access
-// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html)
-// in the Amazon S3 User Guide.
+// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// s3express-control.region.amazonaws.com .
//
-// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
-// s3express-control.region.amazonaws.com . The following operations are related to
-// DeleteBucketPolicy
-// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
-// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
+// # The following operations are related to DeleteBucketPolicy
+//
+// [CreateBucket]
+//
+// [DeleteObject]
+//
+// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
+// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html
func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPolicyInput, optFns ...func(*Options)) (*DeleteBucketPolicyOutput, error) {
if params == nil {
params = &DeleteBucketPolicyInput{}
@@ -68,24 +81,29 @@ func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPol
type DeleteBucketPolicyInput struct {
- // The bucket name. Directory buckets - When you use this operation with a
- // directory bucket, you must use path-style requests in the format
+ // The bucket name.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use path-style requests in the format
// https://s3express-control.region_code.amazonaws.com/bucket-name .
// Virtual-hosted-style requests aren't supported. Directory bucket names must be
// unique in the chosen Availability Zone. Bucket names must also follow the format
// bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
- // ). For information about bucket naming restrictions, see Directory bucket
- // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide
+ // ). For information about bucket naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User
+ // Guide
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
//
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
- // status code 403 Forbidden (access denied). For directory buckets, this header
- // is not supported in this API operation. If you specify this header, the request
- // fails with the HTTP status code 501 Not Implemented .
+ // status code 403 Forbidden (access denied).
+ //
+ // For directory buckets, this header is not supported in this API operation. If
+ // you specify this header, the request fails with the HTTP status code 501 Not
+ // Implemented .
ExpectedBucketOwner *string
noSmithyDocumentSerde
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go
index 9fdc6bcf32..58f91e97f6 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go
@@ -13,18 +13,32 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Deletes the replication
-// configuration from the bucket. To use this operation, you must have permissions
-// to perform the s3:PutReplicationConfiguration action. The bucket owner has
-// these permissions by default and can grant it to others. For more information
-// about permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// . It can take a while for the deletion of a replication configuration to fully
-// propagate. For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html)
-// in the Amazon S3 User Guide. The following operations are related to
-// DeleteBucketReplication :
-// - PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html)
-// - GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html)
+// This operation is not supported by directory buckets.
+//
+// Deletes the replication configuration from the bucket.
+//
+// To use this operation, you must have permissions to perform the
+// s3:PutReplicationConfiguration action. The bucket owner has these permissions by
+// default and can grant it to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations]
+// and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// It can take a while for the deletion of a replication configuration to fully
+// propagate.
+//
+// For information about replication configuration, see [Replication] in the Amazon S3 User
+// Guide.
+//
+// The following operations are related to DeleteBucketReplication :
+//
+// [PutBucketReplication]
+//
+// [GetBucketReplication]
+//
+// [GetBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [PutBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html
+// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
func (c *Client) DeleteBucketReplication(ctx context.Context, params *DeleteBucketReplicationInput, optFns ...func(*Options)) (*DeleteBucketReplicationOutput, error) {
if params == nil {
params = &DeleteBucketReplicationInput{}
@@ -42,7 +56,7 @@ func (c *Client) DeleteBucketReplication(ctx context.Context, params *DeleteBuck
type DeleteBucketReplicationInput struct {
- // The bucket name.
+ // The bucket name.
//
// This member is required.
Bucket *string
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go
index ae737d40a3..8f3386904f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go
@@ -13,13 +13,22 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Deletes the tags from the
-// bucket. To use this operation, you must have permission to perform the
+// This operation is not supported by directory buckets.
+//
+// Deletes the tags from the bucket.
+//
+// To use this operation, you must have permission to perform the
// s3:PutBucketTagging action. By default, the bucket owner has this permission and
-// can grant this permission to others. The following operations are related to
-// DeleteBucketTagging :
-// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html)
-// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html)
+// can grant this permission to others.
+//
+// The following operations are related to DeleteBucketTagging :
+//
+// [GetBucketTagging]
+//
+// [PutBucketTagging]
+//
+// [GetBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html
+// [PutBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html
func (c *Client) DeleteBucketTagging(ctx context.Context, params *DeleteBucketTaggingInput, optFns ...func(*Options)) (*DeleteBucketTaggingOutput, error) {
if params == nil {
params = &DeleteBucketTaggingInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go
index 425936dfac..893cda5052 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go
@@ -13,20 +13,31 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. This action removes the
-// website configuration for a bucket. Amazon S3 returns a 200 OK response upon
-// successfully deleting a website configuration on the specified bucket. You will
-// get a 200 OK response if the website configuration you are trying to delete
-// does not exist on the bucket. Amazon S3 returns a 404 response if the bucket
-// specified in the request does not exist. This DELETE action requires the
-// S3:DeleteBucketWebsite permission. By default, only the bucket owner can delete
-// the website configuration attached to a bucket. However, bucket owners can grant
-// other users permission to delete the website configuration by writing a bucket
-// policy granting them the S3:DeleteBucketWebsite permission. For more
-// information about hosting websites, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html)
-// . The following operations are related to DeleteBucketWebsite :
-// - GetBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html)
-// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html)
+// This operation is not supported by directory buckets.
+//
+// This action removes the website configuration for a bucket. Amazon S3 returns a
+// 200 OK response upon successfully deleting a website configuration on the
+// specified bucket. You will get a 200 OK response if the website configuration
+// you are trying to delete does not exist on the bucket. Amazon S3 returns a 404
+// response if the bucket specified in the request does not exist.
+//
+// This DELETE action requires the S3:DeleteBucketWebsite permission. By default,
+// only the bucket owner can delete the website configuration attached to a bucket.
+// However, bucket owners can grant other users permission to delete the website
+// configuration by writing a bucket policy granting them the
+// S3:DeleteBucketWebsite permission.
+//
+// For more information about hosting websites, see [Hosting Websites on Amazon S3].
+//
+// The following operations are related to DeleteBucketWebsite :
+//
+// [GetBucketWebsite]
+//
+// [PutBucketWebsite]
+//
+// [GetBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html
+// [PutBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html
+// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html
func (c *Client) DeleteBucketWebsite(ctx context.Context, params *DeleteBucketWebsiteInput, optFns ...func(*Options)) (*DeleteBucketWebsiteOutput, error) {
if params == nil {
params = &DeleteBucketWebsiteInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go
index c1e5ff73a9..778d02e54c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go
@@ -22,9 +22,7 @@ import (
// - If bucket versioning is enabled, the operation inserts a delete marker,
// which becomes the current version of the object. To permanently delete an object
// in a versioned bucket, you must include the object’s versionId in the request.
-// For more information about versioning-enabled buckets, see Deleting object
-// versions from a versioning-enabled bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html)
-// .
+// For more information about versioning-enabled buckets, see [Deleting object versions from a versioning-enabled bucket].
//
// - If bucket versioning is suspended, the operation removes the object that
// has a null versionId , if there is one, and inserts a delete marker that
@@ -32,9 +30,7 @@ import (
// versionId , and all versions of the object have a versionId , Amazon S3 does
// not remove the object and only inserts a delete marker. To permanently delete an
// object that has a versionId , you must include the object’s versionId in the
-// request. For more information about versioning-suspended buckets, see
-// Deleting objects from versioning-suspended buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectsfromVersioningSuspendedBuckets.html)
-// .
+// request. For more information about versioning-suspended buckets, see [Deleting objects from versioning-suspended buckets].
//
// - Directory buckets - S3 Versioning isn't enabled and supported for directory
// buckets. For this API operation, only the null value of the version ID is
@@ -45,37 +41,43 @@ import (
// API operation to the Zonal endpoint. These endpoints support
// virtual-hosted-style requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name .
-// Path-style requests are not supported. For more information, see Regional and
-// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide.
+// Path-style requests are not supported. For more information, see [Regional and Zonal endpoints]in the
+// Amazon S3 User Guide.
//
// To remove a specific version, you must use the versionId query parameter. Using
// this query parameter permanently deletes the version. If the object deleted is a
// delete marker, Amazon S3 sets the response header x-amz-delete-marker to true.
+//
// If the object you want to delete is in a bucket where the bucket versioning
// configuration is MFA Delete enabled, you must include the x-amz-mfa request
// header in the DELETE versionId request. Requests that include x-amz-mfa must
-// use HTTPS. For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html)
-// in the Amazon S3 User Guide. To see sample requests that use versioning, see
-// Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete)
-// . Directory buckets - MFA delete is not supported by directory buckets. You can
-// delete objects by explicitly calling DELETE Object or calling (
-// PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)
-// ) to enable Amazon S3 to remove them for you. If you want to block users or
-// accounts from removing or deleting objects from your bucket, you must deny them
-// the s3:DeleteObject , s3:DeleteObjectVersion , and s3:PutLifeCycleConfiguration
-// actions. Directory buckets - S3 Lifecycle is not supported by directory buckets.
+// use HTTPS. For more information about MFA Delete, see [Using MFA Delete]in the Amazon S3 User
+// Guide. To see sample requests that use versioning, see [Sample Request].
+//
+// Directory buckets - MFA delete is not supported by directory buckets.
+//
+// You can delete objects by explicitly calling DELETE Object or calling ([PutBucketLifecycle] ) to
+// enable Amazon S3 to remove them for you. If you want to block users or accounts
+// from removing or deleting objects from your bucket, you must deny them the
+// s3:DeleteObject , s3:DeleteObjectVersion , and s3:PutLifeCycleConfiguration
+// actions.
+//
+// Directory buckets - S3 Lifecycle is not supported by directory buckets.
+//
// Permissions
+//
// - General purpose bucket permissions - The following permissions are required
// in your policies when your DeleteObjects request includes specific headers.
+//
// - s3:DeleteObject - To delete an object from a bucket, you must always have
// the s3:DeleteObject permission.
+//
// - s3:DeleteObjectVersion - To delete a specific version of an object from a
-// versioning-enabled bucket, you must have the s3:DeleteObjectVersion
-// permission.
+// versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission.
+//
// - Directory bucket permissions - To grant access to this API operation on a
-// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// API operation for session-based authorization. Specifically, you grant the
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
// s3express:CreateSession permission to the directory bucket in a bucket policy
// or an IAM identity-based policy. Then, you make the CreateSession API call on
// the bucket to obtain a session token. With the session token in your request
@@ -83,13 +85,23 @@ import (
// expires, you make another CreateSession API call to generate a new session
// token for use. Amazon Web Services CLI or SDKs create session and refresh the
// session token automatically to avoid service interruptions when a session
-// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// .
+// expires. For more information about authorization, see [CreateSession]CreateSession .
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket_name.s3express-az_id.region.amazonaws.com .
+//
+// The following action is related to DeleteObject :
//
-// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
-// Bucket_name.s3express-az_id.region.amazonaws.com . The following action is
-// related to DeleteObject :
-// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+// [PutObject]
+//
+// [Sample Request]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [Deleting objects from versioning-suspended buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectsfromVersioningSuspendedBuckets.html
+// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
+// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+// [Deleting object versions from a versioning-enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html
+// [Using MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html
func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*Options)) (*DeleteObjectOutput, error) {
if params == nil {
params = &DeleteObjectInput{}
@@ -107,31 +119,39 @@ func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, op
type DeleteObjectInput struct {
- // The bucket name of the bucket containing the object. Directory buckets - When
- // you use this operation with a directory bucket, you must use
- // virtual-hosted-style requests in the format
+ // The bucket name of the bucket containing the object.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
- // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide. Access points - When you use this action with an
- // access point, you must provide the alias of the access point in place of the
- // bucket name or specify the access point ARN. When using the access point ARN,
- // you must direct requests to the access point hostname. The access point hostname
- // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. Access points and Object Lambda access points are
- // not supported by directory buckets. S3 on Outposts - When you use this action
- // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
- // hostname. The S3 on Outposts hostname takes the form
+ // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -143,8 +163,9 @@ type DeleteObjectInput struct {
// Indicates whether S3 Object Lock should bypass Governance-mode restrictions to
// process this operation. To use this header, you must have the
- // s3:BypassGovernanceRetention permission. This functionality is not supported for
- // directory buckets.
+ // s3:BypassGovernanceRetention permission.
+ //
+ // This functionality is not supported for directory buckets.
BypassGovernanceRetention *bool
// The account ID of the expected bucket owner. If the account ID that you provide
@@ -155,22 +176,27 @@ type DeleteObjectInput struct {
// The concatenation of the authentication device's serial number, a space, and
// the value that is displayed on your authentication device. Required to
// permanently delete a versioned object if versioning is configured with MFA
- // delete enabled. This functionality is not supported for directory buckets.
+ // delete enabled.
+ //
+ // This functionality is not supported for directory buckets.
MFA *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
- // Version ID used to reference a specific version of the object. For directory
- // buckets in this API operation, only the null value of the version ID is
- // supported.
+ // Version ID used to reference a specific version of the object.
+ //
+ // For directory buckets in this API operation, only the null value of the version
+ // ID is supported.
VersionId *string
noSmithyDocumentSerde
@@ -187,16 +213,21 @@ type DeleteObjectOutput struct {
// Indicates whether the specified object version that was permanently deleted was
// (true) or was not (false) a delete marker before deletion. In a simple DELETE,
// this header indicates whether (true) or not (false) the current version of the
- // object is a delete marker. This functionality is not supported for directory
- // buckets.
+ // object is a delete marker.
+ //
+ // This functionality is not supported for directory buckets.
DeleteMarker *bool
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// Returns the version ID of the delete marker created as a result of the DELETE
- // operation. This functionality is not supported for directory buckets.
+ // operation.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go
index c5f31dec66..7bb534eb1c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go
@@ -12,16 +12,27 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Removes the entire tag
-// set from the specified object. For more information about managing object tags,
-// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html)
-// . To use this operation, you must have permission to perform the
-// s3:DeleteObjectTagging action. To delete tags of a specific object version, add
-// the versionId query parameter in the request. You will need permission for the
-// s3:DeleteObjectVersionTagging action. The following operations are related to
-// DeleteObjectTagging :
-// - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html)
-// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html)
+// This operation is not supported by directory buckets.
+//
+// Removes the entire tag set from the specified object. For more information
+// about managing object tags, see [Object Tagging].
+//
+// To use this operation, you must have permission to perform the
+// s3:DeleteObjectTagging action.
+//
+// To delete tags of a specific object version, add the versionId query parameter
+// in the request. You will need permission for the s3:DeleteObjectVersionTagging
+// action.
+//
+// The following operations are related to DeleteObjectTagging :
+//
+// [PutObjectTagging]
+//
+// [GetObjectTagging]
+//
+// [PutObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html
+// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html
+// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html
func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTaggingInput, optFns ...func(*Options)) (*DeleteObjectTaggingOutput, error) {
if params == nil {
params = &DeleteObjectTaggingInput{}
@@ -39,23 +50,27 @@ func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTa
type DeleteObjectTaggingInput struct {
- // The bucket name containing the objects from which to remove the tags. Access
- // points - When you use this action with an access point, you must provide the
- // alias of the access point in place of the bucket name or specify the access
+ // The bucket name containing the objects from which to remove the tags.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
// point ARN. When using the access point ARN, you must direct requests to the
// access point hostname. The access point hostname takes the form
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
- // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with
- // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
- // The S3 on Outposts hostname takes the form
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go
index 05f82cf756..45b823138a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go
@@ -17,47 +17,57 @@ import (
// This operation enables you to delete multiple objects from a bucket using a
// single HTTP request. If you know the object keys that you want to delete, then
// this operation provides a suitable alternative to sending individual delete
-// requests, reducing per-request overhead. The request can contain a list of up to
-// 1000 keys that you want to delete. In the XML, you provide the object key names,
-// and optionally, version IDs if you want to delete a specific version of the
-// object from a versioning-enabled bucket. For each key, Amazon S3 performs a
-// delete operation and returns the result of that delete, success or failure, in
-// the response. Note that if the object specified in the request is not found,
-// Amazon S3 returns the result as deleted.
+// requests, reducing per-request overhead.
+//
+// The request can contain a list of up to 1000 keys that you want to delete. In
+// the XML, you provide the object key names, and optionally, version IDs if you
+// want to delete a specific version of the object from a versioning-enabled
+// bucket. For each key, Amazon S3 performs a delete operation and returns the
+// result of that delete, success or failure, in the response. Note that if the
+// object specified in the request is not found, Amazon S3 returns the result as
+// deleted.
+//
// - Directory buckets - S3 Versioning isn't enabled and supported for directory
// buckets.
+//
// - Directory buckets - For directory buckets, you must make requests for this
// API operation to the Zonal endpoint. These endpoints support
// virtual-hosted-style requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name .
-// Path-style requests are not supported. For more information, see Regional and
-// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide.
+// Path-style requests are not supported. For more information, see [Regional and Zonal endpoints]in the
+// Amazon S3 User Guide.
//
// The operation supports two modes for the response: verbose and quiet. By
// default, the operation uses verbose mode in which the response includes the
// result of deletion of each key in your request. In quiet mode the response
// includes only keys where the delete operation encountered an error. For a
// successful deletion in a quiet mode, the operation does not return any
-// information about the delete in the response body. When performing this action
-// on an MFA Delete enabled bucket, that attempts to delete any versioned objects,
-// you must include an MFA token. If you do not provide one, the entire request
-// will fail, even if there are non-versioned objects you are trying to delete. If
-// you provide an invalid token, whether there are versioned keys in the request or
-// not, the entire Multi-Object Delete request will fail. For information about MFA
-// Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete)
-// in the Amazon S3 User Guide. Directory buckets - MFA delete is not supported by
-// directory buckets. Permissions
+// information about the delete in the response body.
+//
+// When performing this action on an MFA Delete enabled bucket, that attempts to
+// delete any versioned objects, you must include an MFA token. If you do not
+// provide one, the entire request will fail, even if there are non-versioned
+// objects you are trying to delete. If you provide an invalid token, whether there
+// are versioned keys in the request or not, the entire Multi-Object Delete request
+// will fail. For information about MFA Delete, see [MFA Delete]in the Amazon S3 User Guide.
+//
+// Directory buckets - MFA delete is not supported by directory buckets.
+//
+// Permissions
+//
// - General purpose bucket permissions - The following permissions are required
// in your policies when your DeleteObjects request includes specific headers.
+//
// - s3:DeleteObject - To delete an object from a bucket, you must always specify
// the s3:DeleteObject permission.
+//
// - s3:DeleteObjectVersion - To delete a specific version of an object from a
// versiong-enabled bucket, you must specify the s3:DeleteObjectVersion
// permission.
+//
// - Directory bucket permissions - To grant access to this API operation on a
-// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// API operation for session-based authorization. Specifically, you grant the
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
// s3express:CreateSession permission to the directory bucket in a bucket policy
// or an IAM identity-based policy. Then, you make the CreateSession API call on
// the bucket to obtain a session token. With the session token in your request
@@ -65,26 +75,42 @@ import (
// expires, you make another CreateSession API call to generate a new session
// token for use. Amazon Web Services CLI or SDKs create session and refresh the
// session token automatically to avoid service interruptions when a session
-// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// .
+// expires. For more information about authorization, see [CreateSession]CreateSession .
//
// Content-MD5 request header
+//
// - General purpose bucket - The Content-MD5 request header is required for all
// Multi-Object Delete requests. Amazon S3 uses the header value to ensure that
// your request body has not been altered in transit.
+//
// - Directory bucket - The Content-MD5 request header or a additional checksum
// request header (including x-amz-checksum-crc32 , x-amz-checksum-crc32c ,
// x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is required for all
// Multi-Object Delete requests.
//
-// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
-// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are
-// related to DeleteObjects :
-// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
-// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
-// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
-// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
-// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket_name.s3express-az_id.region.amazonaws.com .
+//
+// The following operations are related to DeleteObjects :
+//
+// [CreateMultipartUpload]
+//
+// [UploadPart]
+//
+// [CompleteMultipartUpload]
+//
+// [ListParts]
+//
+// [AbortMultipartUpload]
+//
+// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
+// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
+// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
+// [MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete
+// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput, optFns ...func(*Options)) (*DeleteObjectsOutput, error) {
if params == nil {
params = &DeleteObjectsInput{}
@@ -102,31 +128,39 @@ func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput,
type DeleteObjectsInput struct {
- // The bucket name containing the objects to delete. Directory buckets - When you
- // use this operation with a directory bucket, you must use virtual-hosted-style
- // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com .
- // Path-style requests are not supported. Directory bucket names must be unique in
- // the chosen Availability Zone. Bucket names must follow the format
- // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
- // ). For information about bucket naming restrictions, see Directory bucket
- // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide. Access points - When you use this action with an
- // access point, you must provide the alias of the access point in place of the
- // bucket name or specify the access point ARN. When using the access point ARN,
- // you must direct requests to the access point hostname. The access point hostname
- // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. Access points and Object Lambda access points are
- // not supported by directory buckets. S3 on Outposts - When you use this action
- // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
- // hostname. The S3 on Outposts hostname takes the form
+ // The bucket name containing the objects to delete.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
+ // supported. Directory bucket names must be unique in the chosen Availability
+ // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
+ // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
+ // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -138,28 +172,40 @@ type DeleteObjectsInput struct {
// Specifies whether you want to delete this object even if it has a
// Governance-type Object Lock in place. To use this header, you must have the
- // s3:BypassGovernanceRetention permission. This functionality is not supported for
- // directory buckets.
+ // s3:BypassGovernanceRetention permission.
+ //
+ // This functionality is not supported for directory buckets.
BypassGovernanceRetention *bool
// Indicates the algorithm used to create the checksum for the object when you use
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3
- // fails the request with the HTTP status code 400 Bad Request . For the
- // x-amz-checksum-algorithm header, replace algorithm with the supported
- // algorithm from the following list:
+ // fails the request with the HTTP status code 400 Bad Request .
+ //
+ // For the x-amz-checksum-algorithm header, replace algorithm with the
+ // supported algorithm from the following list:
+ //
// - CRC32
+ //
// - CRC32C
+ //
// - SHA1
+ //
// - SHA256
- // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If the individual checksum value you provide
- // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set
- // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided
- // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the
- // provided value in x-amz-checksum-algorithm . If you provide an individual
- // checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter.
+ //
+ // For more information, see [Checking object integrity] in the Amazon S3 User Guide.
+ //
+ // If the individual checksum value you provide through x-amz-checksum-algorithm
+ // doesn't match the checksum algorithm you set through
+ // x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided ChecksumAlgorithm
+ // parameter and uses the checksum algorithm that matches the provided value in
+ // x-amz-checksum-algorithm .
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
// The account ID of the expected bucket owner. If the account ID that you provide
@@ -170,25 +216,31 @@ type DeleteObjectsInput struct {
// The concatenation of the authentication device's serial number, a space, and
// the value that is displayed on your authentication device. Required to
// permanently delete a versioned object if versioning is configured with MFA
- // delete enabled. When performing the DeleteObjects operation on an MFA delete
- // enabled bucket, which attempts to delete the specified versioned objects, you
- // must include an MFA token. If you don't provide an MFA token, the entire request
- // will fail, even if there are non-versioned objects that you are trying to
- // delete. If you provide an invalid token, whether there are versioned object keys
- // in the request or not, the entire Multi-Object Delete request will fail. For
- // information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // delete enabled.
+ //
+ // When performing the DeleteObjects operation on an MFA delete enabled bucket,
+ // which attempts to delete the specified versioned objects, you must include an
+ // MFA token. If you don't provide an MFA token, the entire request will fail, even
+ // if there are non-versioned objects that you are trying to delete. If you provide
+ // an invalid token, whether there are versioned object keys in the request or not,
+ // the entire Multi-Object Delete request will fail. For information about MFA
+ // Delete, see [MFA Delete]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete
MFA *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
noSmithyDocumentSerde
@@ -210,7 +262,9 @@ type DeleteObjectsOutput struct {
Errors []types.Error
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go
index 43969e2b19..2e77386c0c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go
@@ -13,17 +13,28 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Removes the
-// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation,
-// you must have the s3:PutBucketPublicAccessBlock permission. For more
-// information about permissions, see Permissions Related to Bucket Subresource
-// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// . The following operations are related to DeletePublicAccessBlock :
-// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html)
-// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html)
-// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html)
-// - GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html)
+// This operation is not supported by directory buckets.
+//
+// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use
+// this operation, you must have the s3:PutBucketPublicAccessBlock permission. For
+// more information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// The following operations are related to DeletePublicAccessBlock :
+//
+// [Using Amazon S3 Block Public Access]
+//
+// [GetPublicAccessBlock]
+//
+// [PutPublicAccessBlock]
+//
+// [GetBucketPolicyStatus]
+//
+// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html
+// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [GetBucketPolicyStatus]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html
func (c *Client) DeletePublicAccessBlock(ctx context.Context, params *DeletePublicAccessBlockInput, optFns ...func(*Options)) (*DeletePublicAccessBlockOutput, error) {
if params == nil {
params = &DeletePublicAccessBlockInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go
index 4bb1ff71cf..be91d27b9f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go
@@ -14,26 +14,36 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. This implementation of
-// the GET action uses the accelerate subresource to return the Transfer
-// Acceleration state of a bucket, which is either Enabled or Suspended . Amazon S3
-// Transfer Acceleration is a bucket-level feature that enables you to perform
-// faster data transfers to and from Amazon S3. To use this operation, you must
-// have permission to perform the s3:GetAccelerateConfiguration action. The bucket
-// owner has this permission by default. The bucket owner can grant this permission
-// to others. For more information about permissions, see Permissions Related to
-// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// in the Amazon S3 User Guide. You set the Transfer Acceleration state of an
-// existing bucket to Enabled or Suspended by using the
-// PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html)
-// operation. A GET accelerate request does not return a state value for a bucket
-// that has no transfer acceleration state. A bucket has no Transfer Acceleration
-// state if a state has never been set on the bucket. For more information about
-// transfer acceleration, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html)
-// in the Amazon S3 User Guide. The following operations are related to
-// GetBucketAccelerateConfiguration :
-// - PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html)
+// This operation is not supported by directory buckets.
+//
+// This implementation of the GET action uses the accelerate subresource to return
+// the Transfer Acceleration state of a bucket, which is either Enabled or
+// Suspended . Amazon S3 Transfer Acceleration is a bucket-level feature that
+// enables you to perform faster data transfers to and from Amazon S3.
+//
+// To use this operation, you must have permission to perform the
+// s3:GetAccelerateConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to your Amazon S3 Resources] in the Amazon S3 User Guide.
+//
+// You set the Transfer Acceleration state of an existing bucket to Enabled or
+// Suspended by using the [PutBucketAccelerateConfiguration] operation.
+//
+// A GET accelerate request does not return a state value for a bucket that has no
+// transfer acceleration state. A bucket has no Transfer Acceleration state if a
+// state has never been set on the bucket.
+//
+// For more information about transfer acceleration, see [Transfer Acceleration] in the Amazon S3 User
+// Guide.
+//
+// The following operations are related to GetBucketAccelerateConfiguration :
+//
+// [PutBucketAccelerateConfiguration]
+//
+// [PutBucketAccelerateConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Managing Access Permissions to your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
func (c *Client) GetBucketAccelerateConfiguration(ctx context.Context, params *GetBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*GetBucketAccelerateConfigurationOutput, error) {
if params == nil {
params = &GetBucketAccelerateConfigurationInput{}
@@ -65,10 +75,12 @@ type GetBucketAccelerateConfigurationInput struct {
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
noSmithyDocumentSerde
@@ -82,7 +94,9 @@ func (in *GetBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointP
type GetBucketAccelerateConfigurationOutput struct {
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// The accelerate configuration of the bucket.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go
index bc7c4ea18b..06e4ad39f2 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go
@@ -14,26 +14,35 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. This implementation of
-// the GET action uses the acl subresource to return the access control list (ACL)
-// of a bucket. To use GET to return the ACL of the bucket, you must have the
-// READ_ACP access to the bucket. If READ_ACP permission is granted to the
-// anonymous user, you can return the ACL of the bucket without using an
-// authorization header. When you use this API operation with an access point,
-// provide the alias of the access point in place of the bucket name. When you use
-// this API operation with an Object Lambda access point, provide the alias of the
-// Object Lambda access point in place of the bucket name. If the Object Lambda
-// access point alias in a request is not valid, the error code
+// This operation is not supported by directory buckets.
+//
+// This implementation of the GET action uses the acl subresource to return the
+// access control list (ACL) of a bucket. To use GET to return the ACL of the
+// bucket, you must have the READ_ACP access to the bucket. If READ_ACP permission
+// is granted to the anonymous user, you can return the ACL of the bucket without
+// using an authorization header.
+//
+// When you use this API operation with an access point, provide the alias of the
+// access point in place of the bucket name.
+//
+// When you use this API operation with an Object Lambda access point, provide the
+// alias of the Object Lambda access point in place of the bucket name. If the
+// Object Lambda access point alias in a request is not valid, the error code
// InvalidAccessPointAliasError is returned. For more information about
-// InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
-// . If your bucket uses the bucket owner enforced setting for S3 Object Ownership,
+// InvalidAccessPointAliasError , see [List of Error Codes].
+//
+// If your bucket uses the bucket owner enforced setting for S3 Object Ownership,
// requests to read ACLs are still supported and return the
// bucket-owner-full-control ACL with the owner being the account that created the
-// bucket. For more information, see Controlling object ownership and disabling
-// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
-// in the Amazon S3 User Guide. The following operations are related to
-// GetBucketAcl :
-// - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html)
+// bucket. For more information, see [Controlling object ownership and disabling ACLs]in the Amazon S3 User Guide.
+//
+// The following operations are related to GetBucketAcl :
+//
+// [ListObjects]
+//
+// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html
+// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
+// [Controlling object ownership and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, optFns ...func(*Options)) (*GetBucketAclOutput, error) {
if params == nil {
params = &GetBucketAclInput{}
@@ -51,14 +60,18 @@ func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, op
type GetBucketAclInput struct {
- // Specifies the S3 bucket whose ACL is being requested. When you use this API
- // operation with an access point, provide the alias of the access point in place
- // of the bucket name. When you use this API operation with an Object Lambda access
- // point, provide the alias of the Object Lambda access point in place of the
- // bucket name. If the Object Lambda access point alias in a request is not valid,
- // the error code InvalidAccessPointAliasError is returned. For more information
- // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
- // .
+ // Specifies the S3 bucket whose ACL is being requested.
+ //
+ // When you use this API operation with an access point, provide the alias of the
+ // access point in place of the bucket name.
+ //
+ // When you use this API operation with an Object Lambda access point, provide the
+ // alias of the Object Lambda access point in place of the bucket name. If the
+ // Object Lambda access point alias in a request is not valid, the error code
+ // InvalidAccessPointAliasError is returned. For more information about
+ // InvalidAccessPointAliasError , see [List of Error Codes].
+ //
+ // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
//
// This member is required.
Bucket *string
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go
index 64e41d4037..6f52c81571 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go
@@ -14,21 +14,33 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. This implementation of
-// the GET action returns an analytics configuration (identified by the analytics
-// configuration ID) from the bucket. To use this operation, you must have
-// permissions to perform the s3:GetAnalyticsConfiguration action. The bucket
-// owner has this permission by default. The bucket owner can grant this permission
-// to others. For more information about permissions, see Permissions Related to
-// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// in the Amazon S3 User Guide. For information about Amazon S3 analytics feature,
-// see Amazon S3 Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html)
-// in the Amazon S3 User Guide. The following operations are related to
-// GetBucketAnalyticsConfiguration :
-// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html)
-// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html)
-// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html)
+// This operation is not supported by directory buckets.
+//
+// This implementation of the GET action returns an analytics configuration
+// (identified by the analytics configuration ID) from the bucket.
+//
+// To use this operation, you must have permissions to perform the
+// s3:GetAnalyticsConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources] in the Amazon S3 User Guide.
+//
+// For information about Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis] in the Amazon S3 User
+// Guide.
+//
+// The following operations are related to GetBucketAnalyticsConfiguration :
+//
+// [DeleteBucketAnalyticsConfiguration]
+//
+// [ListBucketAnalyticsConfigurations]
+//
+// [PutBucketAnalyticsConfiguration]
+//
+// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html
+// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html
+// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
func (c *Client) GetBucketAnalyticsConfiguration(ctx context.Context, params *GetBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*GetBucketAnalyticsConfigurationOutput, error) {
if params == nil {
params = &GetBucketAnalyticsConfigurationInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go
index 0997225ebc..d5db578e55 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go
@@ -14,21 +14,36 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Returns the Cross-Origin
-// Resource Sharing (CORS) configuration information set for the bucket. To use
-// this operation, you must have permission to perform the s3:GetBucketCORS
+// This operation is not supported by directory buckets.
+//
+// Returns the Cross-Origin Resource Sharing (CORS) configuration information set
+// for the bucket.
+//
+// To use this operation, you must have permission to perform the s3:GetBucketCORS
// action. By default, the bucket owner has this permission and can grant it to
-// others. When you use this API operation with an access point, provide the alias
-// of the access point in place of the bucket name. When you use this API operation
-// with an Object Lambda access point, provide the alias of the Object Lambda
-// access point in place of the bucket name. If the Object Lambda access point
-// alias in a request is not valid, the error code InvalidAccessPointAliasError is
-// returned. For more information about InvalidAccessPointAliasError , see List of
-// Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
-// . For more information about CORS, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html)
-// . The following operations are related to GetBucketCors :
-// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html)
-// - DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html)
+// others.
+//
+// When you use this API operation with an access point, provide the alias of the
+// access point in place of the bucket name.
+//
+// When you use this API operation with an Object Lambda access point, provide the
+// alias of the Object Lambda access point in place of the bucket name. If the
+// Object Lambda access point alias in a request is not valid, the error code
+// InvalidAccessPointAliasError is returned. For more information about
+// InvalidAccessPointAliasError , see [List of Error Codes].
+//
+// For more information about CORS, see [Enabling Cross-Origin Resource Sharing].
+//
+// The following operations are related to GetBucketCors :
+//
+// [PutBucketCors]
+//
+// [DeleteBucketCors]
+//
+// [PutBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html
+// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html
+// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
+// [DeleteBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html
func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput, optFns ...func(*Options)) (*GetBucketCorsOutput, error) {
if params == nil {
params = &GetBucketCorsInput{}
@@ -46,14 +61,18 @@ func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput,
type GetBucketCorsInput struct {
- // The bucket name for which to get the cors configuration. When you use this API
- // operation with an access point, provide the alias of the access point in place
- // of the bucket name. When you use this API operation with an Object Lambda access
- // point, provide the alias of the Object Lambda access point in place of the
- // bucket name. If the Object Lambda access point alias in a request is not valid,
- // the error code InvalidAccessPointAliasError is returned. For more information
- // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
- // .
+ // The bucket name for which to get the cors configuration.
+ //
+ // When you use this API operation with an access point, provide the alias of the
+ // access point in place of the bucket name.
+ //
+ // When you use this API operation with an Object Lambda access point, provide the
+ // alias of the Object Lambda access point in place of the bucket name. If the
+ // Object Lambda access point alias in a request is not valid, the error code
+ // InvalidAccessPointAliasError is returned. For more information about
+ // InvalidAccessPointAliasError , see [List of Error Codes].
+ //
+ // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
//
// This member is required.
Bucket *string
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go
index 22c1f9bb51..7e8d251e35 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go
@@ -14,20 +14,29 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Returns the default
-// encryption configuration for an Amazon S3 bucket. By default, all buckets have a
-// default encryption configuration that uses server-side encryption with Amazon S3
-// managed keys (SSE-S3). For information about the bucket default encryption
-// feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
-// in the Amazon S3 User Guide. To use this operation, you must have permission to
-// perform the s3:GetEncryptionConfiguration action. The bucket owner has this
-// permission by default. The bucket owner can grant this permission to others. For
-// more information about permissions, see Permissions Related to Bucket
-// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// . The following operations are related to GetBucketEncryption :
-// - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html)
-// - DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html)
+// This operation is not supported by directory buckets.
+//
+// Returns the default encryption configuration for an Amazon S3 bucket. By
+// default, all buckets have a default encryption configuration that uses
+// server-side encryption with Amazon S3 managed keys (SSE-S3). For information
+// about the bucket default encryption feature, see [Amazon S3 Bucket Default Encryption]in the Amazon S3 User Guide.
+//
+// To use this operation, you must have permission to perform the
+// s3:GetEncryptionConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// The following operations are related to GetBucketEncryption :
+//
+// [PutBucketEncryption]
+//
+// [DeleteBucketEncryption]
+//
+// [DeleteBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html
+// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
func (c *Client) GetBucketEncryption(ctx context.Context, params *GetBucketEncryptionInput, optFns ...func(*Options)) (*GetBucketEncryptionOutput, error) {
if params == nil {
params = &GetBucketEncryptionInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go
index f3ae88a9b2..97f54f343b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go
@@ -14,25 +14,38 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Gets the S3
-// Intelligent-Tiering configuration from the specified bucket. The S3
-// Intelligent-Tiering storage class is designed to optimize storage costs by
-// automatically moving data to the most cost-effective storage access tier,
+// This operation is not supported by directory buckets.
+//
+// Gets the S3 Intelligent-Tiering configuration from the specified bucket.
+//
+// The S3 Intelligent-Tiering storage class is designed to optimize storage costs
+// by automatically moving data to the most cost-effective storage access tier,
// without performance impact or operational overhead. S3 Intelligent-Tiering
// delivers automatic cost savings in three low latency and high throughput access
// tiers. To get the lowest storage cost on data that can be accessed in minutes to
-// hours, you can choose to activate additional archiving capabilities. The S3
-// Intelligent-Tiering storage class is the ideal storage class for data with
-// unknown, changing, or unpredictable access patterns, independent of object size
-// or retention period. If the size of an object is less than 128 KB, it is not
-// monitored and not eligible for auto-tiering. Smaller objects can be stored, but
-// they are always charged at the Frequent Access tier rates in the S3
-// Intelligent-Tiering storage class. For more information, see Storage class for
-// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access)
-// . Operations related to GetBucketIntelligentTieringConfiguration include:
-// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html)
-// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html)
-// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html)
+// hours, you can choose to activate additional archiving capabilities.
+//
+// The S3 Intelligent-Tiering storage class is the ideal storage class for data
+// with unknown, changing, or unpredictable access patterns, independent of object
+// size or retention period. If the size of an object is less than 128 KB, it is
+// not monitored and not eligible for auto-tiering. Smaller objects can be stored,
+// but they are always charged at the Frequent Access tier rates in the S3
+// Intelligent-Tiering storage class.
+//
+// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects].
+//
+// Operations related to GetBucketIntelligentTieringConfiguration include:
+//
+// [DeleteBucketIntelligentTieringConfiguration]
+//
+// [PutBucketIntelligentTieringConfiguration]
+//
+// [ListBucketIntelligentTieringConfigurations]
+//
+// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html
+// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html
+// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access
+// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html
func (c *Client) GetBucketIntelligentTieringConfiguration(ctx context.Context, params *GetBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*GetBucketIntelligentTieringConfigurationOutput, error) {
if params == nil {
params = &GetBucketIntelligentTieringConfigurationInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go
index 123218e976..48d361e10b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go
@@ -14,18 +14,32 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Returns an inventory
-// configuration (identified by the inventory configuration ID) from the bucket. To
-// use this operation, you must have permissions to perform the
+// This operation is not supported by directory buckets.
+//
+// Returns an inventory configuration (identified by the inventory configuration
+// ID) from the bucket.
+//
+// To use this operation, you must have permissions to perform the
// s3:GetInventoryConfiguration action. The bucket owner has this permission by
// default and can grant this permission to others. For more information about
-// permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html)
-// . The following operations are related to GetBucketInventoryConfiguration :
-// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html)
-// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html)
-// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html)
+// permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory].
+//
+// The following operations are related to GetBucketInventoryConfiguration :
+//
+// [DeleteBucketInventoryConfiguration]
+//
+// [ListBucketInventoryConfigurations]
+//
+// [PutBucketInventoryConfiguration]
+//
+// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html
+// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html
func (c *Client) GetBucketInventoryConfiguration(ctx context.Context, params *GetBucketInventoryConfigurationInput, optFns ...func(*Options)) (*GetBucketInventoryConfigurationOutput, error) {
if params == nil {
params = &GetBucketInventoryConfigurationInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go
index ddcbbe0967..f4590e59a5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go
@@ -14,35 +14,51 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Bucket lifecycle
-// configuration now supports specifying a lifecycle rule using an object key name
-// prefix, one or more object tags, object size, or any combination of these.
-// Accordingly, this section describes the latest API. The previous version of the
-// API supported filtering based only on an object key name prefix, which is
-// supported for backward compatibility. For the related API description, see
-// GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html)
-// . Accordingly, this section describes the latest API. The response describes the
-// new filter element that you can use to specify a filter to select a subset of
-// objects to which the rule applies. If you are using a previous version of the
-// lifecycle configuration, it still works. For the earlier action, Returns the
-// lifecycle configuration information set on the bucket. For information about
-// lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
-// . To use this operation, you must have permission to perform the
+// This operation is not supported by directory buckets.
+//
+// Bucket lifecycle configuration now supports specifying a lifecycle rule using
+// an object key name prefix, one or more object tags, object size, or any
+// combination of these. Accordingly, this section describes the latest API. The
+// previous version of the API supported filtering based only on an object key name
+// prefix, which is supported for backward compatibility. For the related API
+// description, see [GetBucketLifecycle]. Accordingly, this section describes the latest API. The
+// response describes the new filter element that you can use to specify a filter
+// to select a subset of objects to which the rule applies. If you are using a
+// previous version of the lifecycle configuration, it still works. For the earlier
+// action,
+//
+// Returns the lifecycle configuration information set on the bucket. For
+// information about lifecycle configuration, see [Object Lifecycle Management].
+//
+// To use this operation, you must have permission to perform the
// s3:GetLifecycleConfiguration action. The bucket owner has this permission, by
// default. The bucket owner can grant this permission to others. For more
-// information about permissions, see Permissions Related to Bucket Subresource
-// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// . GetBucketLifecycleConfiguration has the following special error:
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// GetBucketLifecycleConfiguration has the following special error:
+//
// - Error code: NoSuchLifecycleConfiguration
+//
// - Description: The lifecycle configuration does not exist.
+//
// - HTTP Status Code: 404 Not Found
+//
// - SOAP Fault Code Prefix: Client
//
// The following operations are related to GetBucketLifecycleConfiguration :
-// - GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html)
-// - PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)
-// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html)
+//
+// [GetBucketLifecycle]
+//
+// [PutBucketLifecycle]
+//
+// [DeleteBucketLifecycle]
+//
+// [GetBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html
+// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [DeleteBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html
func (c *Client) GetBucketLifecycleConfiguration(ctx context.Context, params *GetBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*GetBucketLifecycleConfigurationOutput, error) {
if params == nil {
params = &GetBucketLifecycleConfigurationInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go
index aff5f3cd55..a6b362cbe1 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go
@@ -20,23 +20,34 @@ import (
"io"
)
-// This operation is not supported by directory buckets. Returns the Region the
-// bucket resides in. You set the bucket's Region using the LocationConstraint
-// request parameter in a CreateBucket request. For more information, see
-// CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
-// . When you use this API operation with an access point, provide the alias of the
-// access point in place of the bucket name. When you use this API operation with
-// an Object Lambda access point, provide the alias of the Object Lambda access
-// point in place of the bucket name. If the Object Lambda access point alias in a
-// request is not valid, the error code InvalidAccessPointAliasError is returned.
-// For more information about InvalidAccessPointAliasError , see List of Error
-// Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
-// . We recommend that you use HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html)
-// to return the Region that a bucket resides in. For backward compatibility,
-// Amazon S3 continues to support GetBucketLocation. The following operations are
-// related to GetBucketLocation :
-// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
-// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+// This operation is not supported by directory buckets.
+//
+// Returns the Region the bucket resides in. You set the bucket's Region using the
+// LocationConstraint request parameter in a CreateBucket request. For more
+// information, see [CreateBucket].
+//
+// When you use this API operation with an access point, provide the alias of the
+// access point in place of the bucket name.
+//
+// When you use this API operation with an Object Lambda access point, provide the
+// alias of the Object Lambda access point in place of the bucket name. If the
+// Object Lambda access point alias in a request is not valid, the error code
+// InvalidAccessPointAliasError is returned. For more information about
+// InvalidAccessPointAliasError , see [List of Error Codes].
+//
+// We recommend that you use [HeadBucket] to return the Region that a bucket resides in. For
+// backward compatibility, Amazon S3 continues to support GetBucketLocation.
+//
+// The following operations are related to GetBucketLocation :
+//
+// [GetObject]
+//
+// [CreateBucket]
+//
+// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+// [HeadBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html
func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocationInput, optFns ...func(*Options)) (*GetBucketLocationOutput, error) {
if params == nil {
params = &GetBucketLocationInput{}
@@ -54,14 +65,18 @@ func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocatio
type GetBucketLocationInput struct {
- // The name of the bucket for which to get the location. When you use this API
- // operation with an access point, provide the alias of the access point in place
- // of the bucket name. When you use this API operation with an Object Lambda access
- // point, provide the alias of the Object Lambda access point in place of the
- // bucket name. If the Object Lambda access point alias in a request is not valid,
- // the error code InvalidAccessPointAliasError is returned. For more information
- // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
- // .
+ // The name of the bucket for which to get the location.
+ //
+ // When you use this API operation with an access point, provide the alias of the
+ // access point in place of the bucket name.
+ //
+ // When you use this API operation with an Object Lambda access point, provide the
+ // alias of the Object Lambda access point in place of the bucket name. If the
+ // Object Lambda access point alias in a request is not valid, the error code
+ // InvalidAccessPointAliasError is returned. For more information about
+ // InvalidAccessPointAliasError , see [List of Error Codes].
+ //
+ // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
//
// This member is required.
Bucket *string
@@ -82,8 +97,10 @@ func (in *GetBucketLocationInput) bindEndpointParams(p *EndpointParameters) {
type GetBucketLocationOutput struct {
// Specifies the Region where the bucket resides. For a list of all the Amazon S3
- // supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
- // . Buckets in Region us-east-1 have a LocationConstraint of null .
+ // supported location constraints by Region, see [Regions and Endpoints]. Buckets in Region us-east-1
+ // have a LocationConstraint of null .
+ //
+ // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
LocationConstraint types.BucketLocationConstraint
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go
index d1c4f8fbb9..ae6bcbe686 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go
@@ -14,11 +14,19 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Returns the logging
-// status of a bucket and the permissions users have to view and modify that
-// status. The following operations are related to GetBucketLogging :
-// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
-// - PutBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html)
+// This operation is not supported by directory buckets.
+//
+// Returns the logging status of a bucket and the permissions users have to view
+// and modify that status.
+//
+// The following operations are related to GetBucketLogging :
+//
+// [CreateBucket]
+//
+// [PutBucketLogging]
+//
+// [PutBucketLogging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
func (c *Client) GetBucketLogging(ctx context.Context, params *GetBucketLoggingInput, optFns ...func(*Options)) (*GetBucketLoggingOutput, error) {
if params == nil {
params = &GetBucketLoggingInput{}
@@ -57,8 +65,10 @@ func (in *GetBucketLoggingInput) bindEndpointParams(p *EndpointParameters) {
type GetBucketLoggingOutput struct {
// Describes where logs are stored and the prefix that Amazon S3 assigns to all
- // log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html)
- // in the Amazon S3 API Reference.
+ // log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API
+ // Reference.
+ //
+ // [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html
LoggingEnabled *types.LoggingEnabled
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go
index d7499c68cf..474eba5208 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go
@@ -14,21 +14,34 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Gets a metrics
-// configuration (specified by the metrics configuration ID) from the bucket. Note
-// that this doesn't include the daily storage metrics. To use this operation, you
-// must have permissions to perform the s3:GetMetricsConfiguration action. The
-// bucket owner has this permission by default. The bucket owner can grant this
-// permission to others. For more information about permissions, see Permissions
-// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// . For information about CloudWatch request metrics for Amazon S3, see
-// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html)
-// . The following operations are related to GetBucketMetricsConfiguration :
-// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html)
-// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html)
-// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html)
-// - Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html)
+// This operation is not supported by directory buckets.
+//
+// Gets a metrics configuration (specified by the metrics configuration ID) from
+// the bucket. Note that this doesn't include the daily storage metrics.
+//
+// To use this operation, you must have permissions to perform the
+// s3:GetMetricsConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch].
+//
+// The following operations are related to GetBucketMetricsConfiguration :
+//
+// [PutBucketMetricsConfiguration]
+//
+// [DeleteBucketMetricsConfiguration]
+//
+// [ListBucketMetricsConfigurations]
+//
+// [Monitoring Metrics with Amazon CloudWatch]
+//
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html
+// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html
+// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html
+// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
func (c *Client) GetBucketMetricsConfiguration(ctx context.Context, params *GetBucketMetricsConfigurationInput, optFns ...func(*Options)) (*GetBucketMetricsConfigurationOutput, error) {
if params == nil {
params = &GetBucketMetricsConfigurationInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go
index 73155110d8..81c0d815ae 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go
@@ -14,24 +14,38 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Returns the notification
-// configuration of a bucket. If notifications are not enabled on the bucket, the
-// action returns an empty NotificationConfiguration element. By default, you must
-// be the bucket owner to read the notification configuration of a bucket. However,
-// the bucket owner can use a bucket policy to grant permission to other users to
-// read this configuration with the s3:GetBucketNotification permission. When you
-// use this API operation with an access point, provide the alias of the access
-// point in place of the bucket name. When you use this API operation with an
-// Object Lambda access point, provide the alias of the Object Lambda access point
-// in place of the bucket name. If the Object Lambda access point alias in a
-// request is not valid, the error code InvalidAccessPointAliasError is returned.
-// For more information about InvalidAccessPointAliasError , see List of Error
-// Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
-// . For more information about setting and reading the notification configuration
-// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
-// . For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html)
-// . The following action is related to GetBucketNotification :
-// - PutBucketNotification (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html)
+// This operation is not supported by directory buckets.
+//
+// Returns the notification configuration of a bucket.
+//
+// If notifications are not enabled on the bucket, the action returns an empty
+// NotificationConfiguration element.
+//
+// By default, you must be the bucket owner to read the notification configuration
+// of a bucket. However, the bucket owner can use a bucket policy to grant
+// permission to other users to read this configuration with the
+// s3:GetBucketNotification permission.
+//
+// When you use this API operation with an access point, provide the alias of the
+// access point in place of the bucket name.
+//
+// When you use this API operation with an Object Lambda access point, provide the
+// alias of the Object Lambda access point in place of the bucket name. If the
+// Object Lambda access point alias in a request is not valid, the error code
+// InvalidAccessPointAliasError is returned. For more information about
+// InvalidAccessPointAliasError , see [List of Error Codes].
+//
+// For more information about setting and reading the notification configuration
+// on a bucket, see [Setting Up Notification of Bucket Events]. For more information about bucket policies, see [Using Bucket Policies].
+//
+// The following action is related to GetBucketNotification :
+//
+// [PutBucketNotification]
+//
+// [Using Bucket Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html
+// [Setting Up Notification of Bucket Events]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
+// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
+// [PutBucketNotification]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html
func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params *GetBucketNotificationConfigurationInput, optFns ...func(*Options)) (*GetBucketNotificationConfigurationOutput, error) {
if params == nil {
params = &GetBucketNotificationConfigurationInput{}
@@ -49,15 +63,18 @@ func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params
type GetBucketNotificationConfigurationInput struct {
- // The name of the bucket for which to get the notification configuration. When
- // you use this API operation with an access point, provide the alias of the access
- // point in place of the bucket name. When you use this API operation with an
- // Object Lambda access point, provide the alias of the Object Lambda access point
- // in place of the bucket name. If the Object Lambda access point alias in a
- // request is not valid, the error code InvalidAccessPointAliasError is returned.
- // For more information about InvalidAccessPointAliasError , see List of Error
- // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
- // .
+ // The name of the bucket for which to get the notification configuration.
+ //
+ // When you use this API operation with an access point, provide the alias of the
+ // access point in place of the bucket name.
+ //
+ // When you use this API operation with an Object Lambda access point, provide the
+ // alias of the Object Lambda access point in place of the bucket name. If the
+ // Object Lambda access point alias in a request is not valid, the error code
+ // InvalidAccessPointAliasError is returned. For more information about
+ // InvalidAccessPointAliasError , see [List of Error Codes].
+ //
+ // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
//
// This member is required.
Bucket *string
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go
index cea1514286..2b601e1b6e 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go
@@ -14,14 +14,22 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Retrieves
-// OwnershipControls for an Amazon S3 bucket. To use this operation, you must have
-// the s3:GetBucketOwnershipControls permission. For more information about Amazon
-// S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html)
-// . For information about Amazon S3 Object Ownership, see Using Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
-// . The following operations are related to GetBucketOwnershipControls :
-// - PutBucketOwnershipControls
-// - DeleteBucketOwnershipControls
+// This operation is not supported by directory buckets.
+//
+// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you
+// must have the s3:GetBucketOwnershipControls permission. For more information
+// about Amazon S3 permissions, see [Specifying permissions in a policy].
+//
+// For information about Amazon S3 Object Ownership, see [Using Object Ownership].
+//
+// The following operations are related to GetBucketOwnershipControls :
+//
+// # PutBucketOwnershipControls
+//
+// # DeleteBucketOwnershipControls
+//
+// [Using Object Ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
+// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html
func (c *Client) GetBucketOwnershipControls(ctx context.Context, params *GetBucketOwnershipControlsInput, optFns ...func(*Options)) (*GetBucketOwnershipControlsOutput, error) {
if params == nil {
params = &GetBucketOwnershipControlsInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go
index c2f98f9369..29934518e6 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go
@@ -13,47 +13,61 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// Returns the policy of a specified bucket. Directory buckets - For directory
-// buckets, you must make requests for this API operation to the Regional endpoint.
-// These endpoints support path-style requests in the format
-// https://s3express-control.region_code.amazonaws.com/bucket-name .
-// Virtual-hosted-style requests aren't supported. For more information, see
-// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide. Permissions If you are using an identity other than
-// the root user of the Amazon Web Services account that owns the bucket, the
-// calling identity must both have the GetBucketPolicy permissions on the
-// specified bucket and belong to the bucket owner's account in order to use this
-// operation. If you don't have GetBucketPolicy permissions, Amazon S3 returns a
-// 403 Access Denied error. If you have the correct permissions, but you're not
-// using an identity that belongs to the bucket owner's account, Amazon S3 returns
-// a 405 Method Not Allowed error. To ensure that bucket owners don't
-// inadvertently lock themselves out of their own buckets, the root principal in a
-// bucket owner's Amazon Web Services account can perform the GetBucketPolicy ,
-// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket
-// policy explicitly denies the root principal's access. Bucket owner root
-// principals can only be blocked from performing these API actions by VPC endpoint
-// policies and Amazon Web Services Organizations policies.
+// Returns the policy of a specified bucket.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Regional endpoint. These endpoints support path-style requests
+// in the format https://s3express-control.region_code.amazonaws.com/bucket-name .
+// Virtual-hosted-style requests aren't supported. For more information, see [Regional and Zonal endpoints]in
+// the Amazon S3 User Guide.
+//
+// Permissions If you are using an identity other than the root user of the Amazon
+// Web Services account that owns the bucket, the calling identity must both have
+// the GetBucketPolicy permissions on the specified bucket and belong to the
+// bucket owner's account in order to use this operation.
+//
+// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access
+// Denied error. If you have the correct permissions, but you're not using an
+// identity that belongs to the bucket owner's account, Amazon S3 returns a 405
+// Method Not Allowed error.
+//
+// To ensure that bucket owners don't inadvertently lock themselves out of their
+// own buckets, the root principal in a bucket owner's Amazon Web Services account
+// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API
+// actions, even if their bucket policy explicitly denies the root principal's
+// access. Bucket owner root principals can only be blocked from performing these
+// API actions by VPC endpoint policies and Amazon Web Services Organizations
+// policies.
+//
// - General purpose bucket permissions - The s3:GetBucketPolicy permission is
// required in a policy. For more information about general purpose buckets bucket
-// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html)
-// in the Amazon S3 User Guide.
+// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide.
+//
// - Directory bucket permissions - To grant access to this API operation, you
// must have the s3express:GetBucketPolicy permission in an IAM identity-based
// policy instead of a bucket policy. Cross-account access to this API operation
// isn't supported. This operation can only be performed by the Amazon Web Services
// account that owns the resource. For more information about directory bucket
-// policies and permissions, see Amazon Web Services Identity and Access
-// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html)
-// in the Amazon S3 User Guide.
+// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide.
+//
+// Example bucket policies General purpose buckets example bucket policies - See [Bucket policy examples]
+// in the Amazon S3 User Guide.
+//
+// Directory bucket example bucket policies - See [Example bucket policies for S3 Express One Zone] in the Amazon S3 User Guide.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// s3express-control.region.amazonaws.com .
//
-// Example bucket policies General purpose buckets example bucket policies - See
-// Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html)
-// in the Amazon S3 User Guide. Directory bucket example bucket policies - See
-// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html)
-// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The
-// HTTP Host header syntax is s3express-control.region.amazonaws.com . The
-// following action is related to GetBucketPolicy :
-// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+// The following action is related to GetBucketPolicy :
+//
+// [GetObject]
+//
+// [Bucket policy examples]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html
+// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html
func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInput, optFns ...func(*Options)) (*GetBucketPolicyOutput, error) {
if params == nil {
params = &GetBucketPolicyInput{}
@@ -71,33 +85,42 @@ func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInp
type GetBucketPolicyInput struct {
- // The bucket name to get the bucket policy for. Directory buckets - When you use
- // this operation with a directory bucket, you must use path-style requests in the
- // format https://s3express-control.region_code.amazonaws.com/bucket-name .
+ // The bucket name to get the bucket policy for.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use path-style requests in the format
+ // https://s3express-control.region_code.amazonaws.com/bucket-name .
// Virtual-hosted-style requests aren't supported. Directory bucket names must be
// unique in the chosen Availability Zone. Bucket names must also follow the format
// bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
- // ). For information about bucket naming restrictions, see Directory bucket
- // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide Access points - When you use this API operation with
- // an access point, provide the alias of the access point in place of the bucket
- // name. Object Lambda access points - When you use this API operation with an
- // Object Lambda access point, provide the alias of the Object Lambda access point
- // in place of the bucket name. If the Object Lambda access point alias in a
- // request is not valid, the error code InvalidAccessPointAliasError is returned.
- // For more information about InvalidAccessPointAliasError , see List of Error
- // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
- // . Access points and Object Lambda access points are not supported by directory
+ // ). For information about bucket naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User
+ // Guide
+ //
+ // Access points - When you use this API operation with an access point, provide
+ // the alias of the access point in place of the bucket name.
+ //
+ // Object Lambda access points - When you use this API operation with an Object
+ // Lambda access point, provide the alias of the Object Lambda access point in
+ // place of the bucket name. If the Object Lambda access point alias in a request
+ // is not valid, the error code InvalidAccessPointAliasError is returned. For more
+ // information about InvalidAccessPointAliasError , see [List of Error Codes].
+ //
+ // Access points and Object Lambda access points are not supported by directory
// buckets.
//
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
+ //
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
- // status code 403 Forbidden (access denied). For directory buckets, this header
- // is not supported in this API operation. If you specify this header, the request
- // fails with the HTTP status code 501 Not Implemented .
+ // status code 403 Forbidden (access denied).
+ //
+ // For directory buckets, this header is not supported in this API operation. If
+ // you specify this header, the request fails with the HTTP status code 501 Not
+ // Implemented .
ExpectedBucketOwner *string
noSmithyDocumentSerde
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go
index cb36ac504a..52fa5be9dd 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go
@@ -14,18 +14,31 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Retrieves the policy
-// status for an Amazon S3 bucket, indicating whether the bucket is public. In
-// order to use this operation, you must have the s3:GetBucketPolicyStatus
-// permission. For more information about Amazon S3 permissions, see Specifying
-// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
-// . For more information about when Amazon S3 considers a bucket public, see The
-// Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status)
-// . The following operations are related to GetBucketPolicyStatus :
-// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html)
-// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html)
-// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html)
-// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html)
+// This operation is not supported by directory buckets.
+//
+// Retrieves the policy status for an Amazon S3 bucket, indicating whether the
+// bucket is public. In order to use this operation, you must have the
+// s3:GetBucketPolicyStatus permission. For more information about Amazon S3
+// permissions, see [Specifying Permissions in a Policy].
+//
+// For more information about when Amazon S3 considers a bucket public, see [The Meaning of "Public"].
+//
+// The following operations are related to GetBucketPolicyStatus :
+//
+// [Using Amazon S3 Block Public Access]
+//
+// [GetPublicAccessBlock]
+//
+// [PutPublicAccessBlock]
+//
+// [DeletePublicAccessBlock]
+//
+// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html
+// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html
+// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html
+// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html
+// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
+// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status
func (c *Client) GetBucketPolicyStatus(ctx context.Context, params *GetBucketPolicyStatusInput, optFns ...func(*Options)) (*GetBucketPolicyStatusOutput, error) {
if params == nil {
params = &GetBucketPolicyStatusInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go
index 7e44d38eef..3edb1ec2a4 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go
@@ -14,21 +14,37 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Returns the replication
-// configuration of a bucket. It can take a while to propagate the put or delete a
-// replication configuration to all Amazon S3 systems. Therefore, a get request
-// soon after put or delete can return a wrong result. For information about
-// replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html)
-// in the Amazon S3 User Guide. This action requires permissions for the
-// s3:GetReplicationConfiguration action. For more information about permissions,
-// see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html)
-// . If you include the Filter element in a replication configuration, you must
-// also include the DeleteMarkerReplication and Priority elements. The response
-// also returns those elements. For information about GetBucketReplication errors,
-// see List of replication-related error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList)
+// This operation is not supported by directory buckets.
+//
+// Returns the replication configuration of a bucket.
+//
+// It can take a while to propagate the put or delete a replication configuration
+// to all Amazon S3 systems. Therefore, a get request soon after put or delete can
+// return a wrong result.
+//
+// For information about replication configuration, see [Replication] in the Amazon S3 User
+// Guide.
+//
+// This action requires permissions for the s3:GetReplicationConfiguration action.
+// For more information about permissions, see [Using Bucket Policies and User Policies].
+//
+// If you include the Filter element in a replication configuration, you must also
+// include the DeleteMarkerReplication and Priority elements. The response also
+// returns those elements.
+//
+// For information about GetBucketReplication errors, see [List of replication-related error codes]
+//
// The following operations are related to GetBucketReplication :
-// - PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html)
-// - DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html)
+//
+// [PutBucketReplication]
+//
+// [DeleteBucketReplication]
+//
+// [PutBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html
+// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html
+// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html
+// [List of replication-related error codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList
+// [DeleteBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html
func (c *Client) GetBucketReplication(ctx context.Context, params *GetBucketReplicationInput, optFns ...func(*Options)) (*GetBucketReplicationOutput, error) {
if params == nil {
params = &GetBucketReplicationInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go
index 16cc528222..ac546ee0b0 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go
@@ -14,11 +14,17 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Returns the request
-// payment configuration of a bucket. To use this version of the operation, you
-// must be the bucket owner. For more information, see Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html)
-// . The following operations are related to GetBucketRequestPayment :
-// - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html)
+// This operation is not supported by directory buckets.
+//
+// Returns the request payment configuration of a bucket. To use this version of
+// the operation, you must be the bucket owner. For more information, see [Requester Pays Buckets].
+//
+// The following operations are related to GetBucketRequestPayment :
+//
+// [ListObjects]
+//
+// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html
+// [Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html
func (c *Client) GetBucketRequestPayment(ctx context.Context, params *GetBucketRequestPaymentInput, optFns ...func(*Options)) (*GetBucketRequestPaymentOutput, error) {
if params == nil {
params = &GetBucketRequestPaymentInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go
index 69a6e49030..9dc666368b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go
@@ -14,17 +14,28 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Returns the tag set
-// associated with the bucket. To use this operation, you must have permission to
-// perform the s3:GetBucketTagging action. By default, the bucket owner has this
-// permission and can grant this permission to others. GetBucketTagging has the
-// following special error:
+// This operation is not supported by directory buckets.
+//
+// Returns the tag set associated with the bucket.
+//
+// To use this operation, you must have permission to perform the
+// s3:GetBucketTagging action. By default, the bucket owner has this permission and
+// can grant this permission to others.
+//
+// GetBucketTagging has the following special error:
+//
// - Error code: NoSuchTagSet
+//
// - Description: There is no tag set associated with the bucket.
//
// The following operations are related to GetBucketTagging :
-// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html)
-// - DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html)
+//
+// [PutBucketTagging]
+//
+// [DeleteBucketTagging]
+//
+// [PutBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html
+// [DeleteBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html
func (c *Client) GetBucketTagging(ctx context.Context, params *GetBucketTaggingInput, optFns ...func(*Options)) (*GetBucketTaggingOutput, error) {
if params == nil {
params = &GetBucketTaggingInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go
index 10540b136a..d4d7fa261a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go
@@ -14,15 +14,27 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Returns the versioning
-// state of a bucket. To retrieve the versioning state of a bucket, you must be the
-// bucket owner. This implementation also returns the MFA Delete status of the
-// versioning state. If the MFA Delete status is enabled , the bucket owner must
-// use an authentication device to change the versioning state of the bucket. The
-// following operations are related to GetBucketVersioning :
-// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
-// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
-// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
+// This operation is not supported by directory buckets.
+//
+// Returns the versioning state of a bucket.
+//
+// To retrieve the versioning state of a bucket, you must be the bucket owner.
+//
+// This implementation also returns the MFA Delete status of the versioning state.
+// If the MFA Delete status is enabled , the bucket owner must use an
+// authentication device to change the versioning state of the bucket.
+//
+// The following operations are related to GetBucketVersioning :
+//
+// [GetObject]
+//
+// [PutObject]
+//
+// [DeleteObject]
+//
+// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
+// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
func (c *Client) GetBucketVersioning(ctx context.Context, params *GetBucketVersioningInput, optFns ...func(*Options)) (*GetBucketVersioningOutput, error) {
if params == nil {
params = &GetBucketVersioningInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go
index c87f6ff1a4..0cb80bd61a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go
@@ -14,17 +14,26 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Returns the website
-// configuration for a bucket. To host website on Amazon S3, you can configure a
-// bucket as website by adding a website configuration. For more information about
-// hosting websites, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html)
-// . This GET action requires the S3:GetBucketWebsite permission. By default, only
+// This operation is not supported by directory buckets.
+//
+// Returns the website configuration for a bucket. To host website on Amazon S3,
+// you can configure a bucket as website by adding a website configuration. For
+// more information about hosting websites, see [Hosting Websites on Amazon S3].
+//
+// This GET action requires the S3:GetBucketWebsite permission. By default, only
// the bucket owner can read the bucket website configuration. However, bucket
// owners can allow other users to read the website configuration by writing a
-// bucket policy granting them the S3:GetBucketWebsite permission. The following
-// operations are related to GetBucketWebsite :
-// - DeleteBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html)
-// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html)
+// bucket policy granting them the S3:GetBucketWebsite permission.
+//
+// The following operations are related to GetBucketWebsite :
+//
+// [DeleteBucketWebsite]
+//
+// [PutBucketWebsite]
+//
+// [PutBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html
+// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html
+// [DeleteBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html
func (c *Client) GetBucketWebsite(ctx context.Context, params *GetBucketWebsiteInput, optFns ...func(*Options)) (*GetBucketWebsiteOutput, error) {
if params == nil {
params = &GetBucketWebsiteInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go
index a64f5964e1..4ca0b5b46f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go
@@ -16,100 +16,141 @@ import (
"time"
)
-// Retrieves an object from Amazon S3. In the GetObject request, specify the full
-// key name for the object. General purpose buckets - Both the virtual-hosted-style
-// requests and the path-style requests are supported. For a virtual hosted-style
-// request example, if you have the object photos/2006/February/sample.jpg ,
-// specify the object key name as /photos/2006/February/sample.jpg . For a
-// path-style request example, if you have the object
-// photos/2006/February/sample.jpg in the bucket named examplebucket , specify the
-// object key name as /examplebucket/photos/2006/February/sample.jpg . For more
-// information about request types, see HTTP Host Header Bucket Specification (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket)
-// in the Amazon S3 User Guide. Directory buckets - Only virtual-hosted-style
-// requests are supported. For a virtual hosted-style request example, if you have
-// the object photos/2006/February/sample.jpg in the bucket named
+// Retrieves an object from Amazon S3.
+//
+// In the GetObject request, specify the full key name for the object.
+//
+// General purpose buckets - Both the virtual-hosted-style requests and the
+// path-style requests are supported. For a virtual hosted-style request example,
+// if you have the object photos/2006/February/sample.jpg , specify the object key
+// name as /photos/2006/February/sample.jpg . For a path-style request example, if
+// you have the object photos/2006/February/sample.jpg in the bucket named
+// examplebucket , specify the object key name as
+// /examplebucket/photos/2006/February/sample.jpg . For more information about
+// request types, see [HTTP Host Header Bucket Specification]in the Amazon S3 User Guide.
+//
+// Directory buckets - Only virtual-hosted-style requests are supported. For a
+// virtual hosted-style request example, if you have the object
+// photos/2006/February/sample.jpg in the bucket named
// examplebucket--use1-az5--x-s3 , specify the object key name as
// /photos/2006/February/sample.jpg . Also, when you make requests to this API
// operation, your requests are sent to the Zonal endpoint. These endpoints support
// virtual-hosted-style requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style
-// requests are not supported. For more information, see Regional and Zonal
-// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide. Permissions
+// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User
+// Guide.
+//
+// Permissions
// - General purpose bucket permissions - You must have the required permissions
// in a policy. To use GetObject , you must have the READ access to the object
// (or version). If you grant READ access to the anonymous user, the GetObject
// operation returns the object without using an authorization header. For more
-// information, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
-// in the Amazon S3 User Guide. If you include a versionId in your request
-// header, you must have the s3:GetObjectVersion permission to access a specific
-// version of an object. The s3:GetObject permission is not required in this
-// scenario. If you request the current version of an object without a specific
-// versionId in the request header, only the s3:GetObject permission is required.
-// The s3:GetObjectVersion permission is not required in this scenario. If the
-// object that you request doesn’t exist, the error that Amazon S3 returns depends
-// on whether you also have the s3:ListBucket permission.
-// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an
-// HTTP status code 404 Not Found error.
-// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP
-// status code 403 Access Denied error.
-// - Directory bucket permissions - To grant access to this API operation on a
-// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// API operation for session-based authorization. Specifically, you grant the
-// s3express:CreateSession permission to the directory bucket in a bucket policy
-// or an IAM identity-based policy. Then, you make the CreateSession API call on
-// the bucket to obtain a session token. With the session token in your request
-// header, you can make API requests to this operation. After the session token
-// expires, you make another CreateSession API call to generate a new session
-// token for use. Amazon Web Services CLI or SDKs create session and refresh the
-// session token automatically to avoid service interruptions when a session
-// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// .
+// information, see [Specifying permissions in a policy]in the Amazon S3 User Guide.
+//
+// If you include a versionId in your request header, you must have the
+//
+// s3:GetObjectVersion permission to access a specific version of an object. The
+// s3:GetObject permission is not required in this scenario.
+//
+// If you request the current version of an object without a specific versionId in
+//
+// the request header, only the s3:GetObject permission is required. The
+// s3:GetObjectVersion permission is not required in this scenario.
+//
+// If the object that you request doesn’t exist, the error that Amazon S3 returns
+//
+// depends on whether you also have the s3:ListBucket permission.
+//
+// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an
+// HTTP status code 404 Not Found error.
+//
+// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP
+// status code 403 Access Denied error.
+//
+// - Directory bucket permissions - To grant access to this API operation on a
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
+// s3express:CreateSession permission to the directory bucket in a bucket policy
+// or an IAM identity-based policy. Then, you make the CreateSession API call on
+// the bucket to obtain a session token. With the session token in your request
+// header, you can make API requests to this operation. After the session token
+// expires, you make another CreateSession API call to generate a new session
+// token for use. Amazon Web Services CLI or SDKs create session and refresh the
+// session token automatically to avoid service interruptions when a session
+// expires. For more information about authorization, see [CreateSession]CreateSession .
//
// Storage classes If the object you are retrieving is stored in the S3 Glacier
// Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the
// S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep
// Archive Access tier, before you can retrieve the object you must first restore a
-// copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html)
-// . Otherwise, this operation returns an InvalidObjectState error. For
-// information about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html)
-// in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the
-// S3 Express One Zone storage class is supported to store newly created objects.
-// Unsupported storage class values won't write a destination object and will
-// respond with the HTTP status code 400 Bad Request . Encryption Encryption
-// request headers, like x-amz-server-side-encryption , should not be sent for the
-// GetObject requests, if your object uses server-side encryption with Amazon S3
-// managed encryption keys (SSE-S3), server-side encryption with Key Management
-// Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon
-// Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject
-// requests for the object that uses these types of keys, you’ll get an HTTP 400
-// Bad Request error. Overriding response header values through the request There
-// are times when you want to override certain response header values of a
-// GetObject response. For example, you might override the Content-Disposition
-// response header value through your GetObject request. You can override values
-// for a set of response headers. These modified response header values are
-// included only in a successful response, that is, when the HTTP status code 200
-// OK is returned. The headers you can override using the following query
-// parameters in the request are a subset of the headers that Amazon S3 accepts
-// when you create an object. The response headers that you can override for the
-// GetObject response are Cache-Control , Content-Disposition , Content-Encoding ,
-// Content-Language , Content-Type , and Expires . To override values for a set of
-// response headers in the GetObject response, you can use the following query
-// parameters in the request.
+// copy using [RestoreObject]. Otherwise, this operation returns an InvalidObjectState error. For
+// information about restoring archived objects, see [Restoring Archived Objects]in the Amazon S3 User Guide.
+//
+// Directory buckets - For directory buckets, only the S3 Express One Zone storage
+// class is supported to store newly created objects. Unsupported storage class
+// values won't write a destination object and will respond with the HTTP status
+// code 400 Bad Request .
+//
+// Encryption Encryption request headers, like x-amz-server-side-encryption ,
+// should not be sent for the GetObject requests, if your object uses server-side
+// encryption with Amazon S3 managed encryption keys (SSE-S3), server-side
+// encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer
+// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you
+// include the header in your GetObject requests for the object that uses these
+// types of keys, you’ll get an HTTP 400 Bad Request error.
+//
+// Overriding response header values through the request There are times when you
+// want to override certain response header values of a GetObject response. For
+// example, you might override the Content-Disposition response header value
+// through your GetObject request.
+//
+// You can override values for a set of response headers. These modified response
+// header values are included only in a successful response, that is, when the HTTP
+// status code 200 OK is returned. The headers you can override using the
+// following query parameters in the request are a subset of the headers that
+// Amazon S3 accepts when you create an object.
+//
+// The response headers that you can override for the GetObject response are
+// Cache-Control , Content-Disposition , Content-Encoding , Content-Language ,
+// Content-Type , and Expires .
+//
+// To override values for a set of response headers in the GetObject response, you
+// can use the following query parameters in the request.
+//
// - response-cache-control
+//
// - response-content-disposition
+//
// - response-content-encoding
+//
// - response-content-language
+//
// - response-content-type
+//
// - response-expires
//
// When you use these parameters, you must sign the request by using either an
// Authorization header or a presigned URL. These parameters cannot be used with an
-// unsigned (anonymous) request. HTTP Host header syntax Directory buckets - The
-// HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com .
+// unsigned (anonymous) request.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket_name.s3express-az_id.region.amazonaws.com .
+//
// The following operations are related to GetObject :
-// - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html)
-// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html)
+//
+// [ListBuckets]
+//
+// [GetObjectAcl]
+//
+// [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html
+// [HTTP Host Header Bucket Specification]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket
+// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html
+// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html
+// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
+//
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*Options)) (*GetObjectOutput, error) {
if params == nil {
params = &GetObjectInput{}
@@ -127,35 +168,44 @@ func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns .
type GetObjectInput struct {
- // The bucket name containing the object. Directory buckets - When you use this
- // operation with a directory bucket, you must use virtual-hosted-style requests in
- // the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style
- // requests are not supported. Directory bucket names must be unique in the chosen
- // Availability Zone. Bucket names must follow the format
- // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
- // ). For information about bucket naming restrictions, see Directory bucket
- // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide. Access points - When you use this action with an
- // access point, you must provide the alias of the access point in place of the
- // bucket name or specify the access point ARN. When using the access point ARN,
- // you must direct requests to the access point hostname. The access point hostname
- // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. Object Lambda access points - When you use this
- // action with an Object Lambda access point, you must direct requests to the
- // Object Lambda access point hostname. The Object Lambda access point hostname
- // takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com.
+ // The bucket name containing the object.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
+ // supported. Directory bucket names must be unique in the chosen Availability
+ // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
+ // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
+ // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Object Lambda access points - When you use this action with an Object Lambda
+ // access point, you must direct requests to the Object Lambda access point
+ // hostname. The Object Lambda access point hostname takes the form
+ // AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com.
+ //
// Access points and Object Lambda access points are not supported by directory
- // buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts,
- // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts
- // hostname takes the form
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -174,37 +224,55 @@ type GetObjectInput struct {
ExpectedBucketOwner *string
// Return the object only if its entity tag (ETag) is the same as the one
- // specified in this header; otherwise, return a 412 Precondition Failed error. If
- // both of the If-Match and If-Unmodified-Since headers are present in the request
- // as follows: If-Match condition evaluates to true , and; If-Unmodified-Since
- // condition evaluates to false ; then, S3 returns 200 OK and the data requested.
- // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232)
- // .
+ // specified in this header; otherwise, return a 412 Precondition Failed error.
+ //
+ // If both of the If-Match and If-Unmodified-Since headers are present in the
+ // request as follows: If-Match condition evaluates to true , and;
+ // If-Unmodified-Since condition evaluates to false ; then, S3 returns 200 OK and
+ // the data requested.
+ //
+ // For more information about conditional requests, see [RFC 7232].
+ //
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
IfMatch *string
// Return the object only if it has been modified since the specified time;
- // otherwise, return a 304 Not Modified error. If both of the If-None-Match and
- // If-Modified-Since headers are present in the request as follows: If-None-Match
- // condition evaluates to false , and; If-Modified-Since condition evaluates to
- // true ; then, S3 returns 304 Not Modified status code. For more information
- // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) .
+ // otherwise, return a 304 Not Modified error.
+ //
+ // If both of the If-None-Match and If-Modified-Since headers are present in the
+ // request as follows: If-None-Match condition evaluates to false , and;
+ // If-Modified-Since condition evaluates to true ; then, S3 returns 304 Not
+ // Modified status code.
+ //
+ // For more information about conditional requests, see [RFC 7232].
+ //
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
IfModifiedSince *time.Time
// Return the object only if its entity tag (ETag) is different from the one
- // specified in this header; otherwise, return a 304 Not Modified error. If both
- // of the If-None-Match and If-Modified-Since headers are present in the request
- // as follows: If-None-Match condition evaluates to false , and; If-Modified-Since
- // condition evaluates to true ; then, S3 returns 304 Not Modified HTTP status
- // code. For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232)
- // .
+ // specified in this header; otherwise, return a 304 Not Modified error.
+ //
+ // If both of the If-None-Match and If-Modified-Since headers are present in the
+ // request as follows: If-None-Match condition evaluates to false , and;
+ // If-Modified-Since condition evaluates to true ; then, S3 returns 304 Not
+ // Modified HTTP status code.
+ //
+ // For more information about conditional requests, see [RFC 7232].
+ //
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
IfNoneMatch *string
// Return the object only if it has not been modified since the specified time;
- // otherwise, return a 412 Precondition Failed error. If both of the If-Match and
- // If-Unmodified-Since headers are present in the request as follows: If-Match
- // condition evaluates to true , and; If-Unmodified-Since condition evaluates to
- // false ; then, S3 returns 200 OK and the data requested. For more information
- // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) .
+ // otherwise, return a 412 Precondition Failed error.
+ //
+ // If both of the If-Match and If-Unmodified-Since headers are present in the
+ // request as follows: If-Match condition evaluates to true , and;
+ // If-Unmodified-Since condition evaluates to false ; then, S3 returns 200 OK and
+ // the data requested.
+ //
+ // For more information about conditional requests, see [RFC 7232].
+ //
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
IfUnmodifiedSince *time.Time
// Part number of the object being read. This is a positive integer between 1 and
@@ -213,18 +281,23 @@ type GetObjectInput struct {
PartNumber *int32
// Downloads the specified byte range of an object. For more information about the
- // HTTP Range header, see https://www.rfc-editor.org/rfc/rfc9110.html#name-range (https://www.rfc-editor.org/rfc/rfc9110.html#name-range)
- // . Amazon S3 doesn't support retrieving multiple ranges of data per GET request.
+ // HTTP Range header, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-range].
+ //
+ // Amazon S3 doesn't support retrieving multiple ranges of data per GET request.
+ //
+ // [https://www.rfc-editor.org/rfc/rfc9110.html#name-range]: https://www.rfc-editor.org/rfc/rfc9110.html#name-range
Range *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
// Sets the Cache-Control header of the response.
@@ -245,66 +318,90 @@ type GetObjectInput struct {
// Sets the Expires header of the response.
ResponseExpires *time.Time
- // Specifies the algorithm to use when decrypting the object (for example, AES256
- // ). If you encrypt an object by using server-side encryption with
- // customer-provided encryption keys (SSE-C) when you store the object in Amazon
- // S3, then when you GET the object, you must use the following headers:
+ // Specifies the algorithm to use when decrypting the object (for example, AES256 ).
+ //
+ // If you encrypt an object by using server-side encryption with customer-provided
+ // encryption keys (SSE-C) when you store the object in Amazon S3, then when you
+ // GET the object, you must use the following headers:
+ //
// - x-amz-server-side-encryption-customer-algorithm
+ //
// - x-amz-server-side-encryption-customer-key
+ //
// - x-amz-server-side-encryption-customer-key-MD5
- // For more information about SSE-C, see Server-Side Encryption (Using
- // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ //
+ // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
SSECustomerAlgorithm *string
// Specifies the customer-provided encryption key that you originally provided for
// Amazon S3 to encrypt the data before storing it. This value is used to decrypt
// the object when recovering it and must match the one used when storing the data.
// The key must be appropriate for use with the algorithm specified in the
- // x-amz-server-side-encryption-customer-algorithm header. If you encrypt an object
- // by using server-side encryption with customer-provided encryption keys (SSE-C)
- // when you store the object in Amazon S3, then when you GET the object, you must
- // use the following headers:
+ // x-amz-server-side-encryption-customer-algorithm header.
+ //
+ // If you encrypt an object by using server-side encryption with customer-provided
+ // encryption keys (SSE-C) when you store the object in Amazon S3, then when you
+ // GET the object, you must use the following headers:
+ //
// - x-amz-server-side-encryption-customer-algorithm
+ //
// - x-amz-server-side-encryption-customer-key
+ //
// - x-amz-server-side-encryption-customer-key-MD5
- // For more information about SSE-C, see Server-Side Encryption (Using
- // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ //
+ // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
SSECustomerKey *string
// Specifies the 128-bit MD5 digest of the customer-provided encryption key
// according to RFC 1321. Amazon S3 uses this header for a message integrity check
- // to ensure that the encryption key was transmitted without error. If you encrypt
- // an object by using server-side encryption with customer-provided encryption keys
- // (SSE-C) when you store the object in Amazon S3, then when you GET the object,
- // you must use the following headers:
+ // to ensure that the encryption key was transmitted without error.
+ //
+ // If you encrypt an object by using server-side encryption with customer-provided
+ // encryption keys (SSE-C) when you store the object in Amazon S3, then when you
+ // GET the object, you must use the following headers:
+ //
// - x-amz-server-side-encryption-customer-algorithm
+ //
// - x-amz-server-side-encryption-customer-key
+ //
// - x-amz-server-side-encryption-customer-key-MD5
- // For more information about SSE-C, see Server-Side Encryption (Using
- // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ //
+ // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
SSECustomerKeyMD5 *string
- // Version ID used to reference a specific version of the object. By default, the
- // GetObject operation returns the current version of an object. To return a
- // different version, use the versionId subresource.
+ // Version ID used to reference a specific version of the object.
+ //
+ // By default, the GetObject operation returns the current version of an object.
+ // To return a different version, use the versionId subresource.
+ //
// - If you include a versionId in your request header, you must have the
// s3:GetObjectVersion permission to access a specific version of an object. The
// s3:GetObject permission is not required in this scenario.
+ //
// - If you request the current version of an object without a specific versionId
// in the request header, only the s3:GetObject permission is required. The
// s3:GetObjectVersion permission is not required in this scenario.
+ //
// - Directory buckets - S3 Versioning isn't enabled and supported for directory
// buckets. For this API operation, only the null value of the version ID is
// supported by directory buckets. You can only specify null to the versionId
// query parameter in the request.
- // For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html)
- // .
+ //
+ // For more information about versioning, see [PutBucketVersioning].
+ //
+ // [PutBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html
VersionId *string
noSmithyDocumentSerde
@@ -325,35 +422,40 @@ type GetObjectOutput struct {
Body io.ReadCloser
// Indicates whether the object uses an S3 Bucket Key for server-side encryption
- // with Key Management Service (KMS) keys (SSE-KMS). This functionality is not
- // supported for directory buckets.
+ // with Key Management Service (KMS) keys (SSE-KMS).
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool
// Specifies caching behavior along the request/reply chain.
CacheControl *string
// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
- // present if it was uploaded with the object. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide.
+ // present if it was uploaded with the object. For more information, see [Checking object integrity]in the
+ // Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumCRC32 *string
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
- // present if it was uploaded with the object. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide.
+ // present if it was uploaded with the object. For more information, see [Checking object integrity]in the
+ // Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumCRC32C *string
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
- // present if it was uploaded with the object. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide.
+ // present if it was uploaded with the object. For more information, see [Checking object integrity]in the
+ // Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumSHA1 *string
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
- // present if it was uploaded with the object. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide.
+ // present if it was uploaded with the object. For more information, see [Checking object integrity]in the
+ // Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumSHA256 *string
// Specifies presentational information for the object.
@@ -378,9 +480,11 @@ type GetObjectOutput struct {
// Indicates whether the object retrieved was (true) or was not (false) a Delete
// Marker. If false, this response header does not appear in the response.
+ //
// - If the current version of the object is a delete marker, Amazon S3 behaves
// as if the object was deleted and includes x-amz-delete-marker: true in the
// response.
+ //
// - If the specified version in the request is a delete marker, the response
// returns a 405 Method Not Allowed error and the Last-Modified: timestamp
// response header.
@@ -390,20 +494,33 @@ type GetObjectOutput struct {
// specific version of a resource found at a URL.
ETag *string
- // If the object expiration is configured (see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)
- // ), the response includes this header. It includes the expiry-date and rule-id
+ // If the object expiration is configured (see [PutBucketLifecycleConfiguration]PutBucketLifecycleConfiguration ),
+ // the response includes this header. It includes the expiry-date and rule-id
// key-value pairs providing object expiration information. The value of the
- // rule-id is URL-encoded. This functionality is not supported for directory
- // buckets.
+ // rule-id is URL-encoded.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
Expiration *string
// The date and time at which the object is no longer cacheable.
+ //
+ // Deprecated: This field is handled inconsistently across AWS SDKs. Prefer using
+ // the ExpiresString field which contains the unparsed value from the service
+ // response.
Expires *time.Time
- // Date and time when the object was last modified. General purpose buckets - When
- // you specify a versionId of the object in your request, if the specified version
- // in the request is a delete marker, the response returns a 405 Method Not Allowed
- // error and the Last-Modified: timestamp response header.
+ // The unparsed value of the Expires field from the service response. Prefer use
+ // of this value over the normal Expires response field where possible.
+ ExpiresString *string
+
+ // Date and time when the object was last modified.
+ //
+ // General purpose buckets - When you specify a versionId of the object in your
+ // request, if the specified version in the request is a delete marker, the
+ // response returns a 405 Method Not Allowed error and the Last-Modified: timestamp
+ // response header.
LastModified *time.Time
// A map of metadata to store with the object in S3.
@@ -415,20 +532,25 @@ type GetObjectOutput struct {
// are prefixed with x-amz-meta- . This can happen if you create metadata using an
// API like SOAP that supports more flexible metadata than the REST API. For
// example, using SOAP, you can create metadata whose values are not legal HTTP
- // headers. This functionality is not supported for directory buckets.
+ // headers.
+ //
+ // This functionality is not supported for directory buckets.
MissingMeta *int32
// Indicates whether this object has an active legal hold. This field is only
- // returned if you have permission to view an object's legal hold status. This
- // functionality is not supported for directory buckets.
+ // returned if you have permission to view an object's legal hold status.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
- // The Object Lock mode that's currently in place for this object. This
- // functionality is not supported for directory buckets.
+ // The Object Lock mode that's currently in place for this object.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockMode types.ObjectLockMode
- // The date and time when this object's Object Lock will expire. This
- // functionality is not supported for directory buckets.
+ // The date and time when this object's Object Lock will expire.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockRetainUntilDate *time.Time
// The count of parts this object has. This value is only returned if you specify
@@ -436,63 +558,78 @@ type GetObjectOutput struct {
PartsCount *int32
// Amazon S3 can return this if your request involves a bucket that is either a
- // source or destination in a replication rule. This functionality is not supported
- // for directory buckets.
+ // source or destination in a replication rule.
+ //
+ // This functionality is not supported for directory buckets.
ReplicationStatus types.ReplicationStatus
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// Provides information about object restoration action and expiration time of the
- // restored object copy. This functionality is not supported for directory buckets.
- // Only the S3 Express One Zone storage class is supported by directory buckets to
- // store objects.
+ // restored object copy.
+ //
+ // This functionality is not supported for directory buckets. Only the S3 Express
+ // One Zone storage class is supported by directory buckets to store objects.
Restore *string
// If server-side encryption with a customer-provided encryption key was
// requested, the response will include this header to confirm the encryption
- // algorithm that's used. This functionality is not supported for directory
- // buckets.
+ // algorithm that's used.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string
// If server-side encryption with a customer-provided encryption key was
// requested, the response will include this header to provide the round-trip
- // message integrity verification of the customer-provided encryption key. This
- // functionality is not supported for directory buckets.
+ // message integrity verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string
// If present, indicates the ID of the Key Management Service (KMS) symmetric
- // encryption customer managed key that was used for the object. This functionality
- // is not supported for directory buckets.
+ // encryption customer managed key that was used for the object.
+ //
+ // This functionality is not supported for directory buckets.
SSEKMSKeyId *string
// The server-side encryption algorithm used when you store this object in Amazon
- // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only
- // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is
- // supported.
+ // S3 (for example, AES256 , aws:kms , aws:kms:dsse ).
+ //
+ // For directory buckets, only server-side encryption with Amazon S3 managed keys
+ // (SSE-S3) ( AES256 ) is supported.
ServerSideEncryption types.ServerSideEncryption
// Provides storage class information of the object. Amazon S3 returns this header
- // for all objects except for S3 Standard storage class objects. Directory buckets
- // - Only the S3 Express One Zone storage class is supported by directory buckets
- // to store objects.
+ // for all objects except for S3 Standard storage class objects.
+ //
+ // Directory buckets - Only the S3 Express One Zone storage class is supported by
+ // directory buckets to store objects.
StorageClass types.StorageClass
// The number of tags, if any, on the object, when you have the relevant
- // permission to read object tags. You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html)
- // to retrieve the tag set associated with an object. This functionality is not
- // supported for directory buckets.
+ // permission to read object tags.
+ //
+ // You can use [GetObjectTagging] to retrieve the tag set associated with an object.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html
TagCount *int32
- // Version ID of the object. This functionality is not supported for directory
- // buckets.
+ // Version ID of the object.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string
// If the bucket is configured as a website, redirects requests for this object to
// another object in the same bucket or to an external URL. Amazon S3 stores the
- // value of this header in the object metadata. This functionality is not supported
- // for directory buckets.
+ // value of this header in the object metadata.
+ //
+ // This functionality is not supported for directory buckets.
WebsiteRedirectLocation *string
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go
index fc903cb390..9ee25c4e69 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go
@@ -13,24 +13,39 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Returns the access
-// control list (ACL) of an object. To use this operation, you must have
-// s3:GetObjectAcl permissions or READ_ACP access to the object. For more
-// information, see Mapping of ACL permissions and access policy permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping)
-// in the Amazon S3 User Guide This functionality is not supported for Amazon S3 on
-// Outposts. By default, GET returns ACL information about the current version of
-// an object. To return ACL information about a different version, use the
-// versionId subresource. If your bucket uses the bucket owner enforced setting for
-// S3 Object Ownership, requests to read ACLs are still supported and return the
+// This operation is not supported by directory buckets.
+//
+// Returns the access control list (ACL) of an object. To use this operation, you
+// must have s3:GetObjectAcl permissions or READ_ACP access to the object. For
+// more information, see [Mapping of ACL permissions and access policy permissions]in the Amazon S3 User Guide
+//
+// This functionality is not supported for Amazon S3 on Outposts.
+//
+// By default, GET returns ACL information about the current version of an object.
+// To return ACL information about a different version, use the versionId
+// subresource.
+//
+// If your bucket uses the bucket owner enforced setting for S3 Object Ownership,
+// requests to read ACLs are still supported and return the
// bucket-owner-full-control ACL with the owner being the account that created the
-// bucket. For more information, see Controlling object ownership and disabling
-// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
-// in the Amazon S3 User Guide. The following operations are related to
-// GetObjectAcl :
-// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
-// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html)
-// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
-// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+// bucket. For more information, see [Controlling object ownership and disabling ACLs]in the Amazon S3 User Guide.
+//
+// The following operations are related to GetObjectAcl :
+//
+// [GetObject]
+//
+// [GetObjectAttributes]
+//
+// [DeleteObject]
+//
+// [PutObject]
+//
+// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
+// [Mapping of ACL permissions and access policy permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping
+// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html
+// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+// [Controlling object ownership and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, optFns ...func(*Options)) (*GetObjectAclOutput, error) {
if params == nil {
params = &GetObjectAclInput{}
@@ -49,6 +64,7 @@ func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, op
type GetObjectAclInput struct {
// The bucket name that contains the object for which to get the ACL information.
+ //
// Access points - When you use this action with an access point, you must provide
// the alias of the access point in place of the bucket name or specify the access
// point ARN. When using the access point ARN, you must direct requests to the
@@ -56,8 +72,9 @@ type GetObjectAclInput struct {
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
- // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide.
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -76,14 +93,17 @@ type GetObjectAclInput struct {
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
- // Version ID used to reference a specific version of the object. This
- // functionality is not supported for directory buckets.
+ // Version ID used to reference a specific version of the object.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string
noSmithyDocumentSerde
@@ -100,11 +120,13 @@ type GetObjectAclOutput struct {
// A list of grants.
Grants []types.Grant
- // Container for the bucket owner's display name and ID.
+ // Container for the bucket owner's display name and ID.
Owner *types.Owner
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go
index dd1b9257cd..432b879a0b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go
@@ -16,32 +16,39 @@ import (
// Retrieves all the metadata from an object without returning the object itself.
// This operation is useful if you're interested only in an object's metadata.
+//
// GetObjectAttributes combines the functionality of HeadObject and ListParts . All
// of the data returned with each of those individual calls can be returned with a
-// single call to GetObjectAttributes . Directory buckets - For directory buckets,
-// you must make requests for this API operation to the Zonal endpoint. These
-// endpoints support virtual-hosted-style requests in the format
+// single call to GetObjectAttributes .
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style
-// requests are not supported. For more information, see Regional and Zonal
-// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide. Permissions
+// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User
+// Guide.
+//
+// Permissions
+//
// - General purpose bucket permissions - To use GetObjectAttributes , you must
// have READ access to the object. The permissions that you need to use this
// operation with depend on whether the bucket is versioned. If the bucket is
// versioned, you need both the s3:GetObjectVersion and
// s3:GetObjectVersionAttributes permissions for this operation. If the bucket is
// not versioned, you need the s3:GetObject and s3:GetObjectAttributes
-// permissions. For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
-// in the Amazon S3 User Guide. If the object that you request does not exist, the
-// error Amazon S3 returns depends on whether you also have the s3:ListBucket
-// permission.
+// permissions. For more information, see [Specifying Permissions in a Policy]in the Amazon S3 User Guide. If the
+// object that you request does not exist, the error Amazon S3 returns depends on
+// whether you also have the s3:ListBucket permission.
+//
// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an
// HTTP status code 404 Not Found ("no such key") error.
+//
// - If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP
// status code 403 Forbidden ("access denied") error.
+//
// - Directory bucket permissions - To grant access to this API operation on a
-// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// API operation for session-based authorization. Specifically, you grant the
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
// s3express:CreateSession permission to the directory bucket in a bucket policy
// or an IAM identity-based policy. Then, you make the CreateSession API call on
// the bucket to obtain a session token. With the session token in your request
@@ -49,8 +56,7 @@ import (
// expires, you make another CreateSession API call to generate a new session
// token for use. Amazon Web Services CLI or SDKs create session and refresh the
// session token automatically to avoid service interruptions when a session
-// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// .
+// expires. For more information about authorization, see [CreateSession]CreateSession .
//
// Encryption Encryption request headers, like x-amz-server-side-encryption ,
// should not be sent for HEAD requests if your object uses server-side encryption
@@ -61,49 +67,86 @@ import (
// want to specify the encryption method. If you include this header in a GET
// request for an object that uses these types of keys, you’ll get an HTTP 400 Bad
// Request error. It's because the encryption method can't be changed when you
-// retrieve the object. If you encrypt an object by using server-side encryption
-// with customer-provided encryption keys (SSE-C) when you store the object in
-// Amazon S3, then when you retrieve the metadata from the object, you must use the
-// following headers to provide the encryption key for the server to be able to
-// retrieve the object's metadata. The headers are:
+// retrieve the object.
+//
+// If you encrypt an object by using server-side encryption with customer-provided
+// encryption keys (SSE-C) when you store the object in Amazon S3, then when you
+// retrieve the metadata from the object, you must use the following headers to
+// provide the encryption key for the server to be able to retrieve the object's
+// metadata. The headers are:
+//
// - x-amz-server-side-encryption-customer-algorithm
+//
// - x-amz-server-side-encryption-customer-key
+//
// - x-amz-server-side-encryption-customer-key-MD5
//
-// For more information about SSE-C, see Server-Side Encryption (Using
-// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
-// in the Amazon S3 User Guide. Directory bucket permissions - For directory
-// buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (
-// AES256 ) is supported. Versioning Directory buckets - S3 Versioning isn't
-// enabled and supported for directory buckets. For this API operation, only the
-// null value of the version ID is supported by directory buckets. You can only
-// specify null to the versionId query parameter in the request. Conditional
-// request headers Consider the following when using request headers:
+// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide.
+//
+// Directory bucket permissions - For directory buckets, only server-side
+// encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported.
+//
+// Versioning Directory buckets - S3 Versioning isn't enabled and supported for
+// directory buckets. For this API operation, only the null value of the version
+// ID is supported by directory buckets. You can only specify null to the versionId
+// query parameter in the request.
+//
+// Conditional request headers Consider the following when using request headers:
+//
// - If both of the If-Match and If-Unmodified-Since headers are present in the
// request as follows, then Amazon S3 returns the HTTP status code 200 OK and the
// data requested:
+//
// - If-Match condition evaluates to true .
-// - If-Unmodified-Since condition evaluates to false . For more information
-// about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232)
-// .
+//
+// - If-Unmodified-Since condition evaluates to false .
+//
+// For more information about conditional requests, see [RFC 7232].
+//
// - If both of the If-None-Match and If-Modified-Since headers are present in
// the request as follows, then Amazon S3 returns the HTTP status code 304 Not
// Modified :
+//
// - If-None-Match condition evaluates to false .
-// - If-Modified-Since condition evaluates to true . For more information about
-// conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) .
-//
-// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
-// Bucket_name.s3express-az_id.region.amazonaws.com . The following actions are
-// related to GetObjectAttributes :
-// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
-// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html)
-// - GetObjectLegalHold (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html)
-// - GetObjectLockConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html)
-// - GetObjectRetention (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html)
-// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html)
-// - HeadObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html)
-// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
+//
+// - If-Modified-Since condition evaluates to true .
+//
+// For more information about conditional requests, see [RFC 7232].
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket_name.s3express-az_id.region.amazonaws.com .
+//
+// The following actions are related to GetObjectAttributes :
+//
+// [GetObject]
+//
+// [GetObjectAcl]
+//
+// [GetObjectLegalHold]
+//
+// [GetObjectLockConfiguration]
+//
+// [GetObjectRetention]
+//
+// [GetObjectTagging]
+//
+// [HeadObject]
+//
+// [ListParts]
+//
+// [GetObjectLegalHold]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html
+// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
+// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html
+// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
+// [RFC 7232]: https://tools.ietf.org/html/rfc7232
+// [HeadObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html
+// [GetObjectLockConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html
+// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html
+// [GetObjectRetention]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
func (c *Client) GetObjectAttributes(ctx context.Context, params *GetObjectAttributesInput, optFns ...func(*Options)) (*GetObjectAttributesOutput, error) {
if params == nil {
params = &GetObjectAttributesInput{}
@@ -121,31 +164,39 @@ func (c *Client) GetObjectAttributes(ctx context.Context, params *GetObjectAttri
type GetObjectAttributesInput struct {
- // The name of the bucket that contains the object. Directory buckets - When you
- // use this operation with a directory bucket, you must use virtual-hosted-style
- // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com .
- // Path-style requests are not supported. Directory bucket names must be unique in
- // the chosen Availability Zone. Bucket names must follow the format
- // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
- // ). For information about bucket naming restrictions, see Directory bucket
- // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide. Access points - When you use this action with an
- // access point, you must provide the alias of the access point in place of the
- // bucket name or specify the access point ARN. When using the access point ARN,
- // you must direct requests to the access point hostname. The access point hostname
- // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. Access points and Object Lambda access points are
- // not supported by directory buckets. S3 on Outposts - When you use this action
- // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
- // hostname. The S3 on Outposts hostname takes the form
+ // The name of the bucket that contains the object.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
+ // supported. Directory bucket names must be unique in the chosen Availability
+ // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
+ // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
+ // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -177,32 +228,38 @@ type GetObjectAttributesInput struct {
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
- // Specifies the algorithm to use when encrypting the object (for example,
- // AES256). This functionality is not supported for directory buckets.
+ // Specifies the algorithm to use when encrypting the object (for example, AES256).
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string
// Specifies the customer-provided encryption key for Amazon S3 to use in
// encrypting data. This value is used to store the object and then it is
// discarded; Amazon S3 does not store the encryption key. The key must be
// appropriate for use with the algorithm specified in the
- // x-amz-server-side-encryption-customer-algorithm header. This functionality is
- // not supported for directory buckets.
+ // x-amz-server-side-encryption-customer-algorithm header.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
- // encryption key was transmitted without error. This functionality is not
- // supported for directory buckets.
+ // encryption key was transmitted without error.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string
- // The version ID used to reference a specific version of the object. S3
- // Versioning isn't enabled and supported for directory buckets. For this API
+ // The version ID used to reference a specific version of the object.
+ //
+ // S3 Versioning isn't enabled and supported for directory buckets. For this API
// operation, only the null value of the version ID is supported by directory
// buckets. You can only specify null to the versionId query parameter in the
// request.
@@ -223,6 +280,7 @@ type GetObjectAttributesOutput struct {
// Specifies whether the object retrieved was ( true ) or was not ( false ) a
// delete marker. If false , this response header does not appear in the response.
+ //
// This functionality is not supported for directory buckets.
DeleteMarker *bool
@@ -240,18 +298,25 @@ type GetObjectAttributesOutput struct {
ObjectSize *int64
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// Provides the storage class information of the object. Amazon S3 returns this
- // header for all objects except for S3 Standard storage class objects. For more
- // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
- // . Directory buckets - Only the S3 Express One Zone storage class is supported by
+ // header for all objects except for S3 Standard storage class objects.
+ //
+ // For more information, see [Storage Classes].
+ //
+ // Directory buckets - Only the S3 Express One Zone storage class is supported by
// directory buckets to store objects.
+ //
+ // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
StorageClass types.StorageClass
- // The version ID of the object. This functionality is not supported for directory
- // buckets.
+ // The version ID of the object.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go
index 548f5e1cc6..76edc7ad29 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go
@@ -13,11 +13,18 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Gets an object's current
-// legal hold status. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
-// . This functionality is not supported for Amazon S3 on Outposts. The following
-// action is related to GetObjectLegalHold :
-// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html)
+// This operation is not supported by directory buckets.
+//
+// Gets an object's current legal hold status. For more information, see [Locking Objects].
+//
+// This functionality is not supported for Amazon S3 on Outposts.
+//
+// The following action is related to GetObjectLegalHold :
+//
+// [GetObjectAttributes]
+//
+// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html
+// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html
func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalHoldInput, optFns ...func(*Options)) (*GetObjectLegalHoldOutput, error) {
if params == nil {
params = &GetObjectLegalHoldInput{}
@@ -36,15 +43,18 @@ func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalH
type GetObjectLegalHoldInput struct {
// The bucket name containing the object whose legal hold status you want to
- // retrieve. Access points - When you use this action with an access point, you
- // must provide the alias of the access point in place of the bucket name or
- // specify the access point ARN. When using the access point ARN, you must direct
- // requests to the access point hostname. The access point hostname takes the form
+ // retrieve.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
- // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide.
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -63,10 +73,12 @@ type GetObjectLegalHoldInput struct {
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
// The version ID of the object whose legal hold status you want to retrieve.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go
index e8e2fbd9f3..e170ecad1d 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go
@@ -13,12 +13,18 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Gets the Object Lock
-// configuration for a bucket. The rule specified in the Object Lock configuration
-// will be applied by default to every new object placed in the specified bucket.
-// For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
-// . The following action is related to GetObjectLockConfiguration :
-// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html)
+// This operation is not supported by directory buckets.
+//
+// Gets the Object Lock configuration for a bucket. The rule specified in the
+// Object Lock configuration will be applied by default to every new object placed
+// in the specified bucket. For more information, see [Locking Objects].
+//
+// The following action is related to GetObjectLockConfiguration :
+//
+// [GetObjectAttributes]
+//
+// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html
+// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html
func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObjectLockConfigurationInput, optFns ...func(*Options)) (*GetObjectLockConfigurationOutput, error) {
if params == nil {
params = &GetObjectLockConfigurationInput{}
@@ -36,16 +42,18 @@ func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObje
type GetObjectLockConfigurationInput struct {
- // The bucket whose Object Lock configuration you want to retrieve. Access points
- // - When you use this action with an access point, you must provide the alias of
- // the access point in place of the bucket name or specify the access point ARN.
- // When using the access point ARN, you must direct requests to the access point
- // hostname. The access point hostname takes the form
+ // The bucket whose Object Lock configuration you want to retrieve.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
- // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide.
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go
index b4daabf16f..93b8115c2c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go
@@ -13,11 +13,18 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Retrieves an object's
-// retention settings. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
-// . This functionality is not supported for Amazon S3 on Outposts. The following
-// action is related to GetObjectRetention :
-// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html)
+// This operation is not supported by directory buckets.
+//
+// Retrieves an object's retention settings. For more information, see [Locking Objects].
+//
+// This functionality is not supported for Amazon S3 on Outposts.
+//
+// The following action is related to GetObjectRetention :
+//
+// [GetObjectAttributes]
+//
+// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html
+// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html
func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetentionInput, optFns ...func(*Options)) (*GetObjectRetentionOutput, error) {
if params == nil {
params = &GetObjectRetentionInput{}
@@ -36,15 +43,18 @@ func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetent
type GetObjectRetentionInput struct {
// The bucket name containing the object whose retention settings you want to
- // retrieve. Access points - When you use this action with an access point, you
- // must provide the alias of the access point in place of the bucket name or
- // specify the access point ARN. When using the access point ARN, you must direct
- // requests to the access point hostname. The access point hostname takes the form
+ // retrieve.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
- // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide.
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -63,10 +73,12 @@ type GetObjectRetentionInput struct {
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
// The version ID for the object whose retention settings you want to retrieve.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go
index dc15914a0e..4aa175010b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go
@@ -13,20 +13,35 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Returns the tag-set of an
-// object. You send the GET request against the tagging subresource associated with
-// the object. To use this operation, you must have permission to perform the
+// This operation is not supported by directory buckets.
+//
+// Returns the tag-set of an object. You send the GET request against the tagging
+// subresource associated with the object.
+//
+// To use this operation, you must have permission to perform the
// s3:GetObjectTagging action. By default, the GET action returns information about
// current version of an object. For a versioned bucket, you can have multiple
// versions of an object in your bucket. To retrieve tags of any other version, use
// the versionId query parameter. You also need permission for the
-// s3:GetObjectVersionTagging action. By default, the bucket owner has this
-// permission and can grant this permission to others. For information about the
-// Amazon S3 object tagging feature, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html)
-// . The following actions are related to GetObjectTagging :
-// - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html)
-// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html)
-// - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html)
+// s3:GetObjectVersionTagging action.
+//
+// By default, the bucket owner has this permission and can grant this permission
+// to others.
+//
+// For information about the Amazon S3 object tagging feature, see [Object Tagging].
+//
+// The following actions are related to GetObjectTagging :
+//
+// [DeleteObjectTagging]
+//
+// [GetObjectAttributes]
+//
+// [PutObjectTagging]
+//
+// [DeleteObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html
+// [PutObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html
+// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html
+// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html
func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingInput, optFns ...func(*Options)) (*GetObjectTaggingOutput, error) {
if params == nil {
params = &GetObjectTaggingInput{}
@@ -45,6 +60,7 @@ func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingI
type GetObjectTaggingInput struct {
// The bucket name containing the object for which to get the tagging information.
+ //
// Access points - When you use this action with an access point, you must provide
// the alias of the access point in place of the bucket name or specify the access
// point ARN. When using the access point ARN, you must direct requests to the
@@ -52,15 +68,18 @@ type GetObjectTaggingInput struct {
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
- // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with
- // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
- // The S3 on Outposts hostname takes the form
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -79,10 +98,12 @@ type GetObjectTaggingInput struct {
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
// The versionId of the object for which to get the tagging information.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go
index 9fc83178e6..04b1dad8d8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go
@@ -14,14 +14,24 @@ import (
"io"
)
-// This operation is not supported by directory buckets. Returns torrent files
-// from a bucket. BitTorrent can save you bandwidth when you're distributing large
-// files. You can get torrent only for objects that are less than 5 GB in size, and
-// that are not encrypted using server-side encryption with a customer-provided
-// encryption key. To use GET, you must have READ access to the object. This
-// functionality is not supported for Amazon S3 on Outposts. The following action
-// is related to GetObjectTorrent :
-// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+// This operation is not supported by directory buckets.
+//
+// Returns torrent files from a bucket. BitTorrent can save you bandwidth when
+// you're distributing large files.
+//
+// You can get torrent only for objects that are less than 5 GB in size, and that
+// are not encrypted using server-side encryption with a customer-provided
+// encryption key.
+//
+// To use GET, you must have READ access to the object.
+//
+// This functionality is not supported for Amazon S3 on Outposts.
+//
+// The following action is related to GetObjectTorrent :
+//
+// [GetObject]
+//
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
func (c *Client) GetObjectTorrent(ctx context.Context, params *GetObjectTorrentInput, optFns ...func(*Options)) (*GetObjectTorrentOutput, error) {
if params == nil {
params = &GetObjectTorrentInput{}
@@ -58,10 +68,12 @@ type GetObjectTorrentInput struct {
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
noSmithyDocumentSerde
@@ -78,7 +90,9 @@ type GetObjectTorrentOutput struct {
Body io.ReadCloser
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go
index 3689a4e163..e09ce2792c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go
@@ -14,22 +14,38 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Retrieves the
-// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation,
-// you must have the s3:GetBucketPublicAccessBlock permission. For more
-// information about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
-// . When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or
-// an object, it checks the PublicAccessBlock configuration for both the bucket
-// (or the bucket that contains the object) and the bucket owner's account. If the
+// This operation is not supported by directory buckets.
+//
+// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use
+// this operation, you must have the s3:GetBucketPublicAccessBlock permission. For
+// more information about Amazon S3 permissions, see [Specifying Permissions in a Policy].
+//
+// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an
+// object, it checks the PublicAccessBlock configuration for both the bucket (or
+// the bucket that contains the object) and the bucket owner's account. If the
// PublicAccessBlock settings are different between the bucket and the account,
// Amazon S3 uses the most restrictive combination of the bucket-level and
-// account-level settings. For more information about when Amazon S3 considers a
-// bucket or an object public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status)
-// . The following operations are related to GetPublicAccessBlock :
-// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html)
-// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html)
-// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html)
-// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html)
+// account-level settings.
+//
+// For more information about when Amazon S3 considers a bucket or an object
+// public, see [The Meaning of "Public"].
+//
+// The following operations are related to GetPublicAccessBlock :
+//
+// [Using Amazon S3 Block Public Access]
+//
+// [PutPublicAccessBlock]
+//
+// [GetPublicAccessBlock]
+//
+// [DeletePublicAccessBlock]
+//
+// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html
+// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html
+// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html
+// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html
+// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
+// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status
func (c *Client) GetPublicAccessBlock(ctx context.Context, params *GetPublicAccessBlockInput, optFns ...func(*Options)) (*GetPublicAccessBlockOutput, error) {
if params == nil {
params = &GetPublicAccessBlockInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go
index 5f5958916a..413e4ecace 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go
@@ -19,42 +19,54 @@ import (
// You can use this operation to determine if a bucket exists and if you have
// permission to access it. The action returns a 200 OK if the bucket exists and
-// you have permission to access it. If the bucket does not exist or you do not
-// have permission to access it, the HEAD request returns a generic 400 Bad Request
-// , 403 Forbidden or 404 Not Found code. A message body is not included, so you
-// cannot determine the exception beyond these HTTP response codes. Directory
-// buckets - You must make requests for this API operation to the Zonal endpoint.
-// These endpoints support virtual-hosted-style requests in the format
+// you have permission to access it.
+//
+// If the bucket does not exist or you do not have permission to access it, the
+// HEAD request returns a generic 400 Bad Request , 403 Forbidden or 404 Not Found
+// code. A message body is not included, so you cannot determine the exception
+// beyond these HTTP response codes.
+//
+// Directory buckets - You must make requests for this API operation to the Zonal
+// endpoint. These endpoints support virtual-hosted-style requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests
-// are not supported. For more information, see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide. Authentication and authorization All HeadBucket
-// requests must be authenticated and signed by using IAM credentials (access key
-// ID and secret access key for the IAM identities). All headers with the x-amz-
-// prefix, including x-amz-copy-source , must be signed. For more information, see
-// REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html)
-// . Directory bucket - You must use IAM credentials to authenticate and authorize
+// are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User Guide.
+//
+// Authentication and authorization All HeadBucket requests must be authenticated
+// and signed by using IAM credentials (access key ID and secret access key for the
+// IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source
+// , must be signed. For more information, see [REST Authentication].
+//
+// Directory bucket - You must use IAM credentials to authenticate and authorize
// your access to the HeadBucket API operation, instead of using the temporary
-// security credentials through the CreateSession API operation. Amazon Web
-// Services CLI or SDKs handles authentication and authorization on your behalf.
+// security credentials through the CreateSession API operation.
+//
+// Amazon Web Services CLI or SDKs handles authentication and authorization on
+// your behalf.
+//
// Permissions
+//
// - General purpose bucket permissions - To use this operation, you must have
// permissions to perform the s3:ListBucket action. The bucket owner has this
// permission by default and can grant this permission to others. For more
-// information about permissions, see Managing access permissions to your Amazon
-// S3 resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// in the Amazon S3 User Guide.
+// information about permissions, see [Managing access permissions to your Amazon S3 resources]in the Amazon S3 User Guide.
+//
// - Directory bucket permissions - You must have the s3express:CreateSession
// permission in the Action element of a policy. By default, the session is in
// the ReadWrite mode. If you want to restrict the access, you can explicitly set
-// the s3express:SessionMode condition key to ReadOnly on the bucket. For more
-// information about example bucket policies, see Example bucket policies for S3
-// Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html)
-// and Amazon Web Services Identity and Access Management (IAM) identity-based
-// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html)
-// in the Amazon S3 User Guide.
+// the s3express:SessionMode condition key to ReadOnly on the bucket.
+//
+// For more information about example bucket policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3
+//
+// User Guide.
//
-// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
// Bucket_name.s3express-az_id.region.amazonaws.com .
+//
+// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html
+// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
+// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [Managing access permissions to your Amazon S3 resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns ...func(*Options)) (*HeadBucketOutput, error) {
if params == nil {
params = &HeadBucketInput{}
@@ -72,36 +84,46 @@ func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns
type HeadBucketInput struct {
- // The bucket name. Directory buckets - When you use this operation with a
- // directory bucket, you must use virtual-hosted-style requests in the format
+ // The bucket name.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
- // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide. Access points - When you use this action with an
- // access point, you must provide the alias of the access point in place of the
- // bucket name or specify the access point ARN. When using the access point ARN,
- // you must direct requests to the access point hostname. The access point hostname
- // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. Object Lambda access points - When you use this API
- // operation with an Object Lambda access point, provide the alias of the Object
- // Lambda access point in place of the bucket name. If the Object Lambda access
- // point alias in a request is not valid, the error code
- // InvalidAccessPointAliasError is returned. For more information about
- // InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
- // . Access points and Object Lambda access points are not supported by directory
- // buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts,
- // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts
- // hostname takes the form
+ // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Object Lambda access points - When you use this API operation with an Object
+ // Lambda access point, provide the alias of the Object Lambda access point in
+ // place of the bucket name. If the Object Lambda access point alias in a request
+ // is not valid, the error code InvalidAccessPointAliasError is returned. For more
+ // information about InvalidAccessPointAliasError , see [List of Error Codes].
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
//
// This member is required.
Bucket *string
@@ -122,21 +144,26 @@ func (in *HeadBucketInput) bindEndpointParams(p *EndpointParameters) {
type HeadBucketOutput struct {
// Indicates whether the bucket name used in the request is an access point alias.
+ //
// This functionality is not supported for directory buckets.
AccessPointAlias *bool
- // The name of the location where the bucket will be created. For directory
- // buckets, the AZ ID of the Availability Zone where the bucket is created. An
- // example AZ ID value is usw2-az1 . This functionality is only supported by
- // directory buckets.
+ // The name of the location where the bucket will be created.
+ //
+ // For directory buckets, the AZ ID of the Availability Zone where the bucket is
+ // created. An example AZ ID value is usw2-az1 .
+ //
+ // This functionality is only supported by directory buckets.
BucketLocationName *string
- // The type of location where the bucket is created. This functionality is only
- // supported by directory buckets.
+ // The type of location where the bucket is created.
+ //
+ // This functionality is only supported by directory buckets.
BucketLocationType types.LocationType
- // The Region that the bucket is located. This functionality is not supported for
- // directory buckets.
+ // The Region that the bucket is located.
+ //
+ // This functionality is not supported for directory buckets.
BucketRegion *string
// Metadata pertaining to the operation's result.
@@ -285,12 +312,13 @@ type BucketExistsWaiterOptions struct {
// Retryable is function that can be used to override the service defined
// waiter-behavior based on operation output, or returned error. This function is
- // used by the waiter to decide if a state is retryable or a terminal state. By
- // default service-modeled logic will populate this option. This option can thus be
- // used to define a custom waiter state with fall-back to service-modeled waiter
- // state mutators.The function returns an error in case of a failure state. In case
- // of retry state, this function returns a bool value of true and nil error, while
- // in case of success it returns a bool value of false and nil error.
+ // used by the waiter to decide if a state is retryable or a terminal state.
+ //
+ // By default service-modeled logic will populate this option. This option can
+ // thus be used to define a custom waiter state with fall-back to service-modeled
+ // waiter state mutators.The function returns an error in case of a failure state.
+ // In case of retry state, this function returns a bool value of true and nil
+ // error, while in case of success it returns a bool value of false and nil error.
Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error)
}
@@ -450,12 +478,13 @@ type BucketNotExistsWaiterOptions struct {
// Retryable is function that can be used to override the service defined
// waiter-behavior based on operation output, or returned error. This function is
- // used by the waiter to decide if a state is retryable or a terminal state. By
- // default service-modeled logic will populate this option. This option can thus be
- // used to define a custom waiter state with fall-back to service-modeled waiter
- // state mutators.The function returns an error in case of a failure state. In case
- // of retry state, this function returns a bool value of true and nil error, while
- // in case of success it returns a bool value of false and nil error.
+ // used by the waiter to decide if a state is retryable or a terminal state.
+ //
+ // By default service-modeled logic will populate this option. This option can
+ // thus be used to define a custom waiter state with fall-back to service-modeled
+ // waiter state mutators.The function returns an error in case of a failure state.
+ // In case of retry state, this function returns a bool value of true and nil
+ // error, while in case of success it returns a bool value of false and nil error.
Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error)
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go
index 5b7e9b6c35..7152daf07c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go
@@ -19,43 +19,52 @@ import (
// The HEAD operation retrieves metadata from an object without returning the
// object itself. This operation is useful if you're interested only in an object's
-// metadata. A HEAD request has the same options as a GET operation on an object.
-// The response is identical to the GET response except that there is no response
+// metadata.
+//
+// A HEAD request has the same options as a GET operation on an object. The
+// response is identical to the GET response except that there is no response
// body. Because of this, if the HEAD request generates an error, it returns a
// generic code, such as 400 Bad Request , 403 Forbidden , 404 Not Found , 405
// Method Not Allowed , 412 Precondition Failed , or 304 Not Modified . It's not
-// possible to retrieve the exact exception of these error codes. Request headers
-// are limited to 8 KB in size. For more information, see Common Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html)
-// . Directory buckets - For directory buckets, you must make requests for this API
+// possible to retrieve the exact exception of these error codes.
+//
+// Request headers are limited to 8 KB in size. For more information, see [Common Request Headers].
+//
+// Directory buckets - For directory buckets, you must make requests for this API
// operation to the Zonal endpoint. These endpoints support virtual-hosted-style
// requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style
-// requests are not supported. For more information, see Regional and Zonal
-// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide. Permissions
+// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User
+// Guide.
+//
+// Permissions
+//
// - General purpose bucket permissions - To use HEAD , you must have the
// s3:GetObject permission. You need the relevant read object (or version)
-// permission for this operation. For more information, see Actions, resources,
-// and condition keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html)
-// in the Amazon S3 User Guide. If the object you request doesn't exist, the error
-// that Amazon S3 returns depends on whether you also have the s3:ListBucket
-// permission.
-// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an
-// HTTP status code 404 Not Found error.
-// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP
-// status code 403 Forbidden error.
-// - Directory bucket permissions - To grant access to this API operation on a
-// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// API operation for session-based authorization. Specifically, you grant the
-// s3express:CreateSession permission to the directory bucket in a bucket policy
-// or an IAM identity-based policy. Then, you make the CreateSession API call on
-// the bucket to obtain a session token. With the session token in your request
-// header, you can make API requests to this operation. After the session token
-// expires, you make another CreateSession API call to generate a new session
-// token for use. Amazon Web Services CLI or SDKs create session and refresh the
-// session token automatically to avoid service interruptions when a session
-// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// .
+// permission for this operation. For more information, see [Actions, resources, and condition keys for Amazon S3]in the Amazon S3
+// User Guide.
+//
+// If the object you request doesn't exist, the error that Amazon S3 returns
+//
+// depends on whether you also have the s3:ListBucket permission.
+//
+// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an
+// HTTP status code 404 Not Found error.
+//
+// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP
+// status code 403 Forbidden error.
+//
+// - Directory bucket permissions - To grant access to this API operation on a
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
+// s3express:CreateSession permission to the directory bucket in a bucket policy
+// or an IAM identity-based policy. Then, you make the CreateSession API call on
+// the bucket to obtain a session token. With the session token in your request
+// header, you can make API requests to this operation. After the session token
+// expires, you make another CreateSession API call to generate a new session
+// token for use. Amazon Web Services CLI or SDKs create session and refresh the
+// session token automatically to avoid service interruptions when a session
+// expires. For more information about authorization, see [CreateSession]CreateSession .
//
// Encryption Encryption request headers, like x-amz-server-side-encryption ,
// should not be sent for HEAD requests if your object uses server-side encryption
@@ -66,20 +75,26 @@ import (
// want to specify the encryption method. If you include this header in a HEAD
// request for an object that uses these types of keys, you’ll get an HTTP 400 Bad
// Request error. It's because the encryption method can't be changed when you
-// retrieve the object. If you encrypt an object by using server-side encryption
-// with customer-provided encryption keys (SSE-C) when you store the object in
-// Amazon S3, then when you retrieve the metadata from the object, you must use the
-// following headers to provide the encryption key for the server to be able to
-// retrieve the object's metadata. The headers are:
+// retrieve the object.
+//
+// If you encrypt an object by using server-side encryption with customer-provided
+// encryption keys (SSE-C) when you store the object in Amazon S3, then when you
+// retrieve the metadata from the object, you must use the following headers to
+// provide the encryption key for the server to be able to retrieve the object's
+// metadata. The headers are:
+//
// - x-amz-server-side-encryption-customer-algorithm
+//
// - x-amz-server-side-encryption-customer-key
+//
// - x-amz-server-side-encryption-customer-key-MD5
//
-// For more information about SSE-C, see Server-Side Encryption (Using
-// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
-// in the Amazon S3 User Guide. Directory bucket permissions - For directory
-// buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (
-// AES256 ) is supported. Versioning
+// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide.
+//
+// Directory bucket permissions - For directory buckets, only server-side
+// encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported.
+//
+// Versioning
//
// - If the current version of the object is a delete marker, Amazon S3 behaves
// as if the object was deleted and includes x-amz-delete-marker: true in the
@@ -95,11 +110,23 @@ import (
// supported by directory buckets. You can only specify null to the versionId
// query parameter in the request.
//
-// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
-// Bucket_name.s3express-az_id.region.amazonaws.com . The following actions are
-// related to HeadObject :
-// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
-// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html)
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket_name.s3express-az_id.region.amazonaws.com .
+//
+// The following actions are related to HeadObject :
+//
+// [GetObject]
+//
+// [GetObjectAttributes]
+//
+// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html
+// [Actions, resources, and condition keys for Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+// [Common Request Headers]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html
+//
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*Options)) (*HeadObjectOutput, error) {
if params == nil {
params = &HeadObjectInput{}
@@ -117,31 +144,39 @@ func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns
type HeadObjectInput struct {
- // The name of the bucket that contains the object. Directory buckets - When you
- // use this operation with a directory bucket, you must use virtual-hosted-style
- // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com .
- // Path-style requests are not supported. Directory bucket names must be unique in
- // the chosen Availability Zone. Bucket names must follow the format
- // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
- // ). For information about bucket naming restrictions, see Directory bucket
- // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide. Access points - When you use this action with an
- // access point, you must provide the alias of the access point in place of the
- // bucket name or specify the access point ARN. When using the access point ARN,
- // you must direct requests to the access point hostname. The access point hostname
- // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. Access points and Object Lambda access points are
- // not supported by directory buckets. S3 on Outposts - When you use this action
- // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
- // hostname. The S3 on Outposts hostname takes the form
+ // The name of the bucket that contains the object.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
+ // supported. Directory bucket names must be unique in the chosen Availability
+ // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
+ // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
+ // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -151,10 +186,11 @@ type HeadObjectInput struct {
// This member is required.
Key *string
- // To retrieve the checksum, this parameter must be enabled. In addition, if you
- // enable ChecksumMode and the object is encrypted with Amazon Web Services Key
- // Management Service (Amazon Web Services KMS), you must have permission to use
- // the kms:Decrypt action for the request to succeed.
+ // To retrieve the checksum, this parameter must be enabled.
+ //
+ // In addition, if you enable ChecksumMode and the object is encrypted with Amazon
+ // Web Services Key Management Service (Amazon Web Services KMS), you must have
+ // permission to use the kms:Decrypt action for the request to succeed.
ChecksumMode types.ChecksumMode
// The account ID of the expected bucket owner. If the account ID that you provide
@@ -163,40 +199,71 @@ type HeadObjectInput struct {
ExpectedBucketOwner *string
// Return the object only if its entity tag (ETag) is the same as the one
- // specified; otherwise, return a 412 (precondition failed) error. If both of the
- // If-Match and If-Unmodified-Since headers are present in the request as follows:
+ // specified; otherwise, return a 412 (precondition failed) error.
+ //
+ // If both of the If-Match and If-Unmodified-Since headers are present in the
+ // request as follows:
+ //
// - If-Match condition evaluates to true , and;
+ //
// - If-Unmodified-Since condition evaluates to false ;
- // Then Amazon S3 returns 200 OK and the data requested. For more information
- // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) .
+ //
+ // Then Amazon S3 returns 200 OK and the data requested.
+ //
+ // For more information about conditional requests, see [RFC 7232].
+ //
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
IfMatch *string
// Return the object only if it has been modified since the specified time;
- // otherwise, return a 304 (not modified) error. If both of the If-None-Match and
- // If-Modified-Since headers are present in the request as follows:
+ // otherwise, return a 304 (not modified) error.
+ //
+ // If both of the If-None-Match and If-Modified-Since headers are present in the
+ // request as follows:
+ //
// - If-None-Match condition evaluates to false , and;
+ //
// - If-Modified-Since condition evaluates to true ;
- // Then Amazon S3 returns the 304 Not Modified response code. For more information
- // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) .
+ //
+ // Then Amazon S3 returns the 304 Not Modified response code.
+ //
+ // For more information about conditional requests, see [RFC 7232].
+ //
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
IfModifiedSince *time.Time
// Return the object only if its entity tag (ETag) is different from the one
- // specified; otherwise, return a 304 (not modified) error. If both of the
- // If-None-Match and If-Modified-Since headers are present in the request as
- // follows:
+ // specified; otherwise, return a 304 (not modified) error.
+ //
+ // If both of the If-None-Match and If-Modified-Since headers are present in the
+ // request as follows:
+ //
// - If-None-Match condition evaluates to false , and;
+ //
// - If-Modified-Since condition evaluates to true ;
- // Then Amazon S3 returns the 304 Not Modified response code. For more information
- // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) .
+ //
+ // Then Amazon S3 returns the 304 Not Modified response code.
+ //
+ // For more information about conditional requests, see [RFC 7232].
+ //
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
IfNoneMatch *string
// Return the object only if it has not been modified since the specified time;
- // otherwise, return a 412 (precondition failed) error. If both of the If-Match
- // and If-Unmodified-Since headers are present in the request as follows:
+ // otherwise, return a 412 (precondition failed) error.
+ //
+ // If both of the If-Match and If-Unmodified-Since headers are present in the
+ // request as follows:
+ //
// - If-Match condition evaluates to true , and;
+ //
// - If-Unmodified-Since condition evaluates to false ;
- // Then Amazon S3 returns 200 OK and the data requested. For more information
- // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) .
+ //
+ // Then Amazon S3 returns 200 OK and the data requested.
+ //
+ // For more information about conditional requests, see [RFC 7232].
+ //
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
IfUnmodifiedSince *time.Time
// Part number of the object being read. This is a positive integer between 1 and
@@ -214,33 +281,39 @@ type HeadObjectInput struct {
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
- // Specifies the algorithm to use when encrypting the object (for example,
- // AES256). This functionality is not supported for directory buckets.
+ // Specifies the algorithm to use when encrypting the object (for example, AES256).
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string
// Specifies the customer-provided encryption key for Amazon S3 to use in
// encrypting data. This value is used to store the object and then it is
// discarded; Amazon S3 does not store the encryption key. The key must be
// appropriate for use with the algorithm specified in the
- // x-amz-server-side-encryption-customer-algorithm header. This functionality is
- // not supported for directory buckets.
+ // x-amz-server-side-encryption-customer-algorithm header.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
- // encryption key was transmitted without error. This functionality is not
- // supported for directory buckets.
+ // encryption key was transmitted without error.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string
- // Version ID used to reference a specific version of the object. For directory
- // buckets in this API operation, only the null value of the version ID is
- // supported.
+ // Version ID used to reference a specific version of the object.
+ //
+ // For directory buckets in this API operation, only the null value of the version
+ // ID is supported.
VersionId *string
noSmithyDocumentSerde
@@ -257,13 +330,15 @@ type HeadObjectOutput struct {
// Indicates that a range of bytes was specified.
AcceptRanges *string
- // The archive state of the head object. This functionality is not supported for
- // directory buckets.
+ // The archive state of the head object.
+ //
+ // This functionality is not supported for directory buckets.
ArchiveStatus types.ArchiveStatus
// Indicates whether the object uses an S3 Bucket Key for server-side encryption
- // with Key Management Service (KMS) keys (SSE-KMS). This functionality is not
- // supported for directory buckets.
+ // with Key Management Service (KMS) keys (SSE-KMS).
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool
// Specifies caching behavior along the request/reply chain.
@@ -274,8 +349,10 @@ type HeadObjectOutput struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumCRC32 *string
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
@@ -283,8 +360,10 @@ type HeadObjectOutput struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumCRC32C *string
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
@@ -292,8 +371,10 @@ type HeadObjectOutput struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumSHA1 *string
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
@@ -301,8 +382,10 @@ type HeadObjectOutput struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumSHA256 *string
// Specifies presentational information for the object.
@@ -323,24 +406,36 @@ type HeadObjectOutput struct {
ContentType *string
// Specifies whether the object retrieved was (true) or was not (false) a Delete
- // Marker. If false, this response header does not appear in the response. This
- // functionality is not supported for directory buckets.
+ // Marker. If false, this response header does not appear in the response.
+ //
+ // This functionality is not supported for directory buckets.
DeleteMarker *bool
// An entity tag (ETag) is an opaque identifier assigned by a web server to a
// specific version of a resource found at a URL.
ETag *string
- // If the object expiration is configured (see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)
- // ), the response includes this header. It includes the expiry-date and rule-id
+ // If the object expiration is configured (see [PutBucketLifecycleConfiguration]PutBucketLifecycleConfiguration ),
+ // the response includes this header. It includes the expiry-date and rule-id
// key-value pairs providing object expiration information. The value of the
- // rule-id is URL-encoded. This functionality is not supported for directory
- // buckets.
+ // rule-id is URL-encoded.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
Expiration *string
// The date and time at which the object is no longer cacheable.
+ //
+ // Deprecated: This field is handled inconsistently across AWS SDKs. Prefer using
+ // the ExpiresString field which contains the unparsed value from the service
+ // response.
Expires *time.Time
+ // The unparsed value of the Expires field from the service response. Prefer use
+ // of this value over the normal Expires response field where possible.
+ ExpiresString *string
+
// Date and time when the object was last modified.
LastModified *time.Time
@@ -352,26 +447,34 @@ type HeadObjectOutput struct {
// This is set to the number of metadata entries not returned in x-amz-meta
// headers. This can happen if you create metadata using an API like SOAP that
// supports more flexible metadata than the REST API. For example, using SOAP, you
- // can create metadata whose values are not legal HTTP headers. This functionality
- // is not supported for directory buckets.
+ // can create metadata whose values are not legal HTTP headers.
+ //
+ // This functionality is not supported for directory buckets.
MissingMeta *int32
// Specifies whether a legal hold is in effect for this object. This header is
// only returned if the requester has the s3:GetObjectLegalHold permission. This
// header is not returned if the specified version of this object has never had a
- // legal hold applied. For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
- // . This functionality is not supported for directory buckets.
+ // legal hold applied. For more information about S3 Object Lock, see [Object Lock].
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html
ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
// The Object Lock mode, if any, that's in effect for this object. This header is
// only returned if the requester has the s3:GetObjectRetention permission. For
- // more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
- // . This functionality is not supported for directory buckets.
+ // more information about S3 Object Lock, see [Object Lock].
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html
ObjectLockMode types.ObjectLockMode
// The date and time when the Object Lock retention period expires. This header is
- // only returned if the requester has the s3:GetObjectRetention permission. This
- // functionality is not supported for directory buckets.
+ // only returned if the requester has the s3:GetObjectRetention permission.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockRetainUntilDate *time.Time
// The count of parts this object has. This value is only returned if you specify
@@ -379,89 +482,121 @@ type HeadObjectOutput struct {
PartsCount *int32
// Amazon S3 can return this header if your request involves a bucket that is
- // either a source or a destination in a replication rule. In replication, you have
- // a source bucket on which you configure replication and destination bucket or
- // buckets where Amazon S3 stores object replicas. When you request an object (
- // GetObject ) or object metadata ( HeadObject ) from these buckets, Amazon S3 will
- // return the x-amz-replication-status header in the response as follows:
+ // either a source or a destination in a replication rule.
+ //
+ // In replication, you have a source bucket on which you configure replication and
+ // destination bucket or buckets where Amazon S3 stores object replicas. When you
+ // request an object ( GetObject ) or object metadata ( HeadObject ) from these
+ // buckets, Amazon S3 will return the x-amz-replication-status header in the
+ // response as follows:
+ //
// - If requesting an object from the source bucket, Amazon S3 will return the
// x-amz-replication-status header if the object in your request is eligible for
- // replication. For example, suppose that in your replication configuration, you
- // specify object prefix TaxDocs requesting Amazon S3 to replicate objects with
- // key prefix TaxDocs . Any objects you upload with this key name prefix, for
- // example TaxDocs/document1.pdf , are eligible for replication. For any object
- // request with this key name prefix, Amazon S3 will return the
- // x-amz-replication-status header with value PENDING, COMPLETED or FAILED
- // indicating object replication status.
+ // replication.
+ //
+ // For example, suppose that in your replication configuration, you specify object
+ // prefix TaxDocs requesting Amazon S3 to replicate objects with key prefix
+ // TaxDocs . Any objects you upload with this key name prefix, for example
+ // TaxDocs/document1.pdf , are eligible for replication. For any object request
+ // with this key name prefix, Amazon S3 will return the x-amz-replication-status
+ // header with value PENDING, COMPLETED or FAILED indicating object replication
+ // status.
+ //
// - If requesting an object from a destination bucket, Amazon S3 will return
// the x-amz-replication-status header with value REPLICA if the object in your
// request is a replica that Amazon S3 created and there is no replica modification
// replication in progress.
+ //
// - When replicating objects to multiple destination buckets, the
// x-amz-replication-status header acts differently. The header of the source
// object will only return a value of COMPLETED when replication is successful to
// all destinations. The header will remain at value PENDING until replication has
// completed for all destinations. If one or more destinations fails replication
// the header will return FAILED.
- // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
- // . This functionality is not supported for directory buckets.
+ //
+ // For more information, see [Replication].
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
ReplicationStatus types.ReplicationStatus
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// If the object is an archived object (an object whose storage class is GLACIER),
// the response includes this header if either the archive restoration is in
- // progress (see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html)
- // or an archive copy is already restored. If an archive copy is already restored,
- // the header value indicates when Amazon S3 is scheduled to delete the object
- // copy. For example: x-amz-restore: ongoing-request="false", expiry-date="Fri, 21
- // Dec 2012 00:00:00 GMT" If the object restoration is in progress, the header
- // returns the value ongoing-request="true" . For more information about archiving
- // objects, see Transitioning Objects: General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations)
- // . This functionality is not supported for directory buckets. Only the S3 Express
+ // progress (see [RestoreObject]or an archive copy is already restored.
+ //
+ // If an archive copy is already restored, the header value indicates when Amazon
+ // S3 is scheduled to delete the object copy. For example:
+ //
+ // x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00
+ // GMT"
+ //
+ // If the object restoration is in progress, the header returns the value
+ // ongoing-request="true" .
+ //
+ // For more information about archiving objects, see [Transitioning Objects: General Considerations].
+ //
+ // This functionality is not supported for directory buckets. Only the S3 Express
// One Zone storage class is supported by directory buckets to store objects.
+ //
+ // [Transitioning Objects: General Considerations]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations
+ // [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html
Restore *string
// If server-side encryption with a customer-provided encryption key was
// requested, the response will include this header to confirm the encryption
- // algorithm that's used. This functionality is not supported for directory
- // buckets.
+ // algorithm that's used.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string
// If server-side encryption with a customer-provided encryption key was
// requested, the response will include this header to provide the round-trip
- // message integrity verification of the customer-provided encryption key. This
- // functionality is not supported for directory buckets.
+ // message integrity verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string
// If present, indicates the ID of the Key Management Service (KMS) symmetric
- // encryption customer managed key that was used for the object. This functionality
- // is not supported for directory buckets.
+ // encryption customer managed key that was used for the object.
+ //
+ // This functionality is not supported for directory buckets.
SSEKMSKeyId *string
// The server-side encryption algorithm used when you store this object in Amazon
- // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only
- // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is
- // supported.
+ // S3 (for example, AES256 , aws:kms , aws:kms:dsse ).
+ //
+ // For directory buckets, only server-side encryption with Amazon S3 managed keys
+ // (SSE-S3) ( AES256 ) is supported.
ServerSideEncryption types.ServerSideEncryption
// Provides storage class information of the object. Amazon S3 returns this header
- // for all objects except for S3 Standard storage class objects. For more
- // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
- // . Directory buckets - Only the S3 Express One Zone storage class is supported by
+ // for all objects except for S3 Standard storage class objects.
+ //
+ // For more information, see [Storage Classes].
+ //
+ // Directory buckets - Only the S3 Express One Zone storage class is supported by
// directory buckets to store objects.
+ //
+ // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
StorageClass types.StorageClass
- // Version ID of the object. This functionality is not supported for directory
- // buckets.
+ // Version ID of the object.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string
// If the bucket is configured as a website, redirects requests for this object to
// another object in the same bucket or to an external URL. Amazon S3 stores the
- // value of this header in the object metadata. This functionality is not supported
- // for directory buckets.
+ // value of this header in the object metadata.
+ //
+ // This functionality is not supported for directory buckets.
WebsiteRedirectLocation *string
// Metadata pertaining to the operation's result.
@@ -610,12 +745,13 @@ type ObjectExistsWaiterOptions struct {
// Retryable is function that can be used to override the service defined
// waiter-behavior based on operation output, or returned error. This function is
- // used by the waiter to decide if a state is retryable or a terminal state. By
- // default service-modeled logic will populate this option. This option can thus be
- // used to define a custom waiter state with fall-back to service-modeled waiter
- // state mutators.The function returns an error in case of a failure state. In case
- // of retry state, this function returns a bool value of true and nil error, while
- // in case of success it returns a bool value of false and nil error.
+ // used by the waiter to decide if a state is retryable or a terminal state.
+ //
+ // By default service-modeled logic will populate this option. This option can
+ // thus be used to define a custom waiter state with fall-back to service-modeled
+ // waiter state mutators.The function returns an error in case of a failure state.
+ // In case of retry state, this function returns a bool value of true and nil
+ // error, while in case of success it returns a bool value of false and nil error.
Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error)
}
@@ -775,12 +911,13 @@ type ObjectNotExistsWaiterOptions struct {
// Retryable is function that can be used to override the service defined
// waiter-behavior based on operation output, or returned error. This function is
- // used by the waiter to decide if a state is retryable or a terminal state. By
- // default service-modeled logic will populate this option. This option can thus be
- // used to define a custom waiter state with fall-back to service-modeled waiter
- // state mutators.The function returns an error in case of a failure state. In case
- // of retry state, this function returns a bool value of true and nil error, while
- // in case of success it returns a bool value of false and nil error.
+ // used by the waiter to decide if a state is retryable or a terminal state.
+ //
+ // By default service-modeled logic will populate this option. This option can
+ // thus be used to define a custom waiter state with fall-back to service-modeled
+ // waiter state mutators.The function returns an error in case of a failure state.
+ // In case of retry state, this function returns a bool value of true and nil
+ // error, while in case of success it returns a bool value of false and nil error.
Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error)
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go
index 67b7571c11..bc31e5695a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go
@@ -14,27 +14,40 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Lists the analytics
-// configurations for the bucket. You can have up to 1,000 analytics configurations
-// per bucket. This action supports list pagination and does not return more than
-// 100 configurations at a time. You should always check the IsTruncated element
-// in the response. If there are no more configurations to list, IsTruncated is
-// set to false. If there are more configurations to list, IsTruncated is set to
-// true, and there will be a value in NextContinuationToken . You use the
+// This operation is not supported by directory buckets.
+//
+// Lists the analytics configurations for the bucket. You can have up to 1,000
+// analytics configurations per bucket.
+//
+// This action supports list pagination and does not return more than 100
+// configurations at a time. You should always check the IsTruncated element in
+// the response. If there are no more configurations to list, IsTruncated is set
+// to false. If there are more configurations to list, IsTruncated is set to true,
+// and there will be a value in NextContinuationToken . You use the
// NextContinuationToken value to continue the pagination of the list by passing
-// the value in continuation-token in the request to GET the next page. To use
-// this operation, you must have permissions to perform the
+// the value in continuation-token in the request to GET the next page.
+//
+// To use this operation, you must have permissions to perform the
// s3:GetAnalyticsConfiguration action. The bucket owner has this permission by
// default. The bucket owner can grant this permission to others. For more
-// information about permissions, see Permissions Related to Bucket Subresource
-// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// . For information about Amazon S3 analytics feature, see Amazon S3 Analytics –
-// Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html)
-// . The following operations are related to ListBucketAnalyticsConfigurations :
-// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html)
-// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html)
-// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html)
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// For information about Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis].
+//
+// The following operations are related to ListBucketAnalyticsConfigurations :
+//
+// [GetBucketAnalyticsConfiguration]
+//
+// [DeleteBucketAnalyticsConfiguration]
+//
+// [PutBucketAnalyticsConfiguration]
+//
+// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html
+// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html
+// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
func (c *Client) ListBucketAnalyticsConfigurations(ctx context.Context, params *ListBucketAnalyticsConfigurationsInput, optFns ...func(*Options)) (*ListBucketAnalyticsConfigurationsOutput, error) {
if params == nil {
params = &ListBucketAnalyticsConfigurationsInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go
index 729f878567..0a6cb145fb 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go
@@ -14,25 +14,38 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Lists the S3
-// Intelligent-Tiering configuration from the specified bucket. The S3
-// Intelligent-Tiering storage class is designed to optimize storage costs by
-// automatically moving data to the most cost-effective storage access tier,
+// This operation is not supported by directory buckets.
+//
+// Lists the S3 Intelligent-Tiering configuration from the specified bucket.
+//
+// The S3 Intelligent-Tiering storage class is designed to optimize storage costs
+// by automatically moving data to the most cost-effective storage access tier,
// without performance impact or operational overhead. S3 Intelligent-Tiering
// delivers automatic cost savings in three low latency and high throughput access
// tiers. To get the lowest storage cost on data that can be accessed in minutes to
-// hours, you can choose to activate additional archiving capabilities. The S3
-// Intelligent-Tiering storage class is the ideal storage class for data with
-// unknown, changing, or unpredictable access patterns, independent of object size
-// or retention period. If the size of an object is less than 128 KB, it is not
-// monitored and not eligible for auto-tiering. Smaller objects can be stored, but
-// they are always charged at the Frequent Access tier rates in the S3
-// Intelligent-Tiering storage class. For more information, see Storage class for
-// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access)
-// . Operations related to ListBucketIntelligentTieringConfigurations include:
-// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html)
-// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html)
-// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html)
+// hours, you can choose to activate additional archiving capabilities.
+//
+// The S3 Intelligent-Tiering storage class is the ideal storage class for data
+// with unknown, changing, or unpredictable access patterns, independent of object
+// size or retention period. If the size of an object is less than 128 KB, it is
+// not monitored and not eligible for auto-tiering. Smaller objects can be stored,
+// but they are always charged at the Frequent Access tier rates in the S3
+// Intelligent-Tiering storage class.
+//
+// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects].
+//
+// Operations related to ListBucketIntelligentTieringConfigurations include:
+//
+// [DeleteBucketIntelligentTieringConfiguration]
+//
+// [PutBucketIntelligentTieringConfiguration]
+//
+// [GetBucketIntelligentTieringConfiguration]
+//
+// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html
+// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html
+// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access
+// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html
func (c *Client) ListBucketIntelligentTieringConfigurations(ctx context.Context, params *ListBucketIntelligentTieringConfigurationsInput, optFns ...func(*Options)) (*ListBucketIntelligentTieringConfigurationsOutput, error) {
if params == nil {
params = &ListBucketIntelligentTieringConfigurationsInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go
index 6c879048ca..fa760e6c57 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go
@@ -14,26 +14,40 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Returns a list of
-// inventory configurations for the bucket. You can have up to 1,000 analytics
-// configurations per bucket. This action supports list pagination and does not
-// return more than 100 configurations at a time. Always check the IsTruncated
-// element in the response. If there are no more configurations to list,
-// IsTruncated is set to false. If there are more configurations to list,
-// IsTruncated is set to true, and there is a value in NextContinuationToken . You
-// use the NextContinuationToken value to continue the pagination of the list by
-// passing the value in continuation-token in the request to GET the next page. To
-// use this operation, you must have permissions to perform the
+// This operation is not supported by directory buckets.
+//
+// Returns a list of inventory configurations for the bucket. You can have up to
+// 1,000 analytics configurations per bucket.
+//
+// This action supports list pagination and does not return more than 100
+// configurations at a time. Always check the IsTruncated element in the response.
+// If there are no more configurations to list, IsTruncated is set to false. If
+// there are more configurations to list, IsTruncated is set to true, and there is
+// a value in NextContinuationToken . You use the NextContinuationToken value to
+// continue the pagination of the list by passing the value in continuation-token
+// in the request to GET the next page.
+//
+// To use this operation, you must have permissions to perform the
// s3:GetInventoryConfiguration action. The bucket owner has this permission by
// default. The bucket owner can grant this permission to others. For more
-// information about permissions, see Permissions Related to Bucket Subresource
-// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html)
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory]
+//
// The following operations are related to ListBucketInventoryConfigurations :
-// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html)
-// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html)
-// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html)
+//
+// [GetBucketInventoryConfiguration]
+//
+// [DeleteBucketInventoryConfiguration]
+//
+// [PutBucketInventoryConfiguration]
+//
+// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html
+// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html
func (c *Client) ListBucketInventoryConfigurations(ctx context.Context, params *ListBucketInventoryConfigurationsInput, optFns ...func(*Options)) (*ListBucketInventoryConfigurationsOutput, error) {
if params == nil {
params = &ListBucketInventoryConfigurationsInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go
index 0b6ca94733..efae4c5cd3 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go
@@ -13,28 +13,42 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Lists the metrics
-// configurations for the bucket. The metrics configurations are only for the
-// request metrics of the bucket and do not provide information on daily storage
-// metrics. You can have up to 1,000 configurations per bucket. This action
-// supports list pagination and does not return more than 100 configurations at a
-// time. Always check the IsTruncated element in the response. If there are no
-// more configurations to list, IsTruncated is set to false. If there are more
-// configurations to list, IsTruncated is set to true, and there is a value in
-// NextContinuationToken . You use the NextContinuationToken value to continue the
-// pagination of the list by passing the value in continuation-token in the
-// request to GET the next page. To use this operation, you must have permissions
-// to perform the s3:GetMetricsConfiguration action. The bucket owner has this
-// permission by default. The bucket owner can grant this permission to others. For
-// more information about permissions, see Permissions Related to Bucket
-// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// . For more information about metrics configurations and CloudWatch request
-// metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html)
-// . The following operations are related to ListBucketMetricsConfigurations :
-// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html)
-// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html)
-// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html)
+// This operation is not supported by directory buckets.
+//
+// Lists the metrics configurations for the bucket. The metrics configurations are
+// only for the request metrics of the bucket and do not provide information on
+// daily storage metrics. You can have up to 1,000 configurations per bucket.
+//
+// This action supports list pagination and does not return more than 100
+// configurations at a time. Always check the IsTruncated element in the response.
+// If there are no more configurations to list, IsTruncated is set to false. If
+// there are more configurations to list, IsTruncated is set to true, and there is
+// a value in NextContinuationToken . You use the NextContinuationToken value to
+// continue the pagination of the list by passing the value in continuation-token
+// in the request to GET the next page.
+//
+// To use this operation, you must have permissions to perform the
+// s3:GetMetricsConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// For more information about metrics configurations and CloudWatch request
+// metrics, see [Monitoring Metrics with Amazon CloudWatch].
+//
+// The following operations are related to ListBucketMetricsConfigurations :
+//
+// [PutBucketMetricsConfiguration]
+//
+// [GetBucketMetricsConfiguration]
+//
+// [DeleteBucketMetricsConfiguration]
+//
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html
+// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html
+// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html
+// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
func (c *Client) ListBucketMetricsConfigurations(ctx context.Context, params *ListBucketMetricsConfigurationsInput, optFns ...func(*Options)) (*ListBucketMetricsConfigurationsOutput, error) {
if params == nil {
params = &ListBucketMetricsConfigurationsInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go
index 086d9d2900..dd5396f13c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go
@@ -13,11 +13,14 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Returns a list of all
-// buckets owned by the authenticated sender of the request. To use this operation,
-// you must have the s3:ListAllMyBuckets permission. For information about Amazon
-// S3 buckets, see Creating, configuring, and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html)
-// .
+// This operation is not supported by directory buckets.
+//
+// Returns a list of all buckets owned by the authenticated sender of the request.
+// To use this operation, you must have the s3:ListAllMyBuckets permission.
+//
+// For information about Amazon S3 buckets, see [Creating, configuring, and working with Amazon S3 buckets].
+//
+// [Creating, configuring, and working with Amazon S3 buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html
func (c *Client) ListBuckets(ctx context.Context, params *ListBucketsInput, optFns ...func(*Options)) (*ListBucketsOutput, error) {
if params == nil {
params = &ListBucketsInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go
index 3ebf78af19..16cefddd92 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go
@@ -15,23 +15,27 @@ import (
)
// Returns a list of all Amazon S3 directory buckets owned by the authenticated
-// sender of the request. For more information about directory buckets, see
-// Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html)
-// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must
-// make requests for this API operation to the Regional endpoint. These endpoints
-// support path-style requests in the format
-// https://s3express-control.region_code.amazonaws.com/bucket-name .
-// Virtual-hosted-style requests aren't supported. For more information, see
-// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide. Permissions You must have the
-// s3express:ListAllMyDirectoryBuckets permission in an IAM identity-based policy
-// instead of a bucket policy. Cross-account access to this API operation isn't
-// supported. This operation can only be performed by the Amazon Web Services
-// account that owns the resource. For more information about directory bucket
-// policies and permissions, see Amazon Web Services Identity and Access
-// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html)
-// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The
-// HTTP Host header syntax is s3express-control.region.amazonaws.com .
+// sender of the request. For more information about directory buckets, see [Directory buckets]in the
+// Amazon S3 User Guide.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Regional endpoint. These endpoints support path-style requests
+// in the format https://s3express-control.region_code.amazonaws.com/bucket-name .
+// Virtual-hosted-style requests aren't supported. For more information, see [Regional and Zonal endpoints]in
+// the Amazon S3 User Guide.
+//
+// Permissions You must have the s3express:ListAllMyDirectoryBuckets permission in
+// an IAM identity-based policy instead of a bucket policy. Cross-account access to
+// this API operation isn't supported. This operation can only be performed by the
+// Amazon Web Services account that owns the resource. For more information about
+// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// s3express-control.region.amazonaws.com .
+//
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html
+// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html
func (c *Client) ListDirectoryBuckets(ctx context.Context, params *ListDirectoryBucketsInput, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) {
if params == nil {
params = &ListDirectoryBucketsInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go
index 183773651a..a6c450b33c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go
@@ -16,38 +16,45 @@ import (
// This operation lists in-progress multipart uploads in a bucket. An in-progress
// multipart upload is a multipart upload that has been initiated by the
// CreateMultipartUpload request, but has not yet been completed or aborted.
+//
// Directory buckets - If multipart uploads in a directory bucket are in progress,
// you can't delete the bucket until all the in-progress multipart uploads are
-// aborted or completed. The ListMultipartUploads operation returns a maximum of
-// 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is
-// also the default value. You can further limit the number of uploads in a
-// response by specifying the max-uploads request parameter. If there are more
-// than 1,000 multipart uploads that satisfy your ListMultipartUploads request,
-// the response returns an IsTruncated element with the value of true , a
-// NextKeyMarker element, and a NextUploadIdMarker element. To list the remaining
-// multipart uploads, you need to make subsequent ListMultipartUploads requests.
-// In these requests, include two query parameters: key-marker and upload-id-marker
-// . Set the value of key-marker to the NextKeyMarker value from the previous
-// response. Similarly, set the value of upload-id-marker to the NextUploadIdMarker
-// value from the previous response. Directory buckets - The upload-id-marker
-// element and the NextUploadIdMarker element aren't supported by directory
-// buckets. To list the additional multipart uploads, you only need to set the
-// value of key-marker to the NextKeyMarker value from the previous response. For
-// more information about multipart uploads, see Uploading Objects Using Multipart
-// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html)
-// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must
-// make requests for this API operation to the Zonal endpoint. These endpoints
-// support virtual-hosted-style requests in the format
+// aborted or completed.
+//
+// The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads
+// in the response. The limit of 1,000 multipart uploads is also the default value.
+// You can further limit the number of uploads in a response by specifying the
+// max-uploads request parameter. If there are more than 1,000 multipart uploads
+// that satisfy your ListMultipartUploads request, the response returns an
+// IsTruncated element with the value of true , a NextKeyMarker element, and a
+// NextUploadIdMarker element. To list the remaining multipart uploads, you need to
+// make subsequent ListMultipartUploads requests. In these requests, include two
+// query parameters: key-marker and upload-id-marker . Set the value of key-marker
+// to the NextKeyMarker value from the previous response. Similarly, set the value
+// of upload-id-marker to the NextUploadIdMarker value from the previous response.
+//
+// Directory buckets - The upload-id-marker element and the NextUploadIdMarker
+// element aren't supported by directory buckets. To list the additional multipart
+// uploads, you only need to set the value of key-marker to the NextKeyMarker
+// value from the previous response.
+//
+// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style
-// requests are not supported. For more information, see Regional and Zonal
-// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide. Permissions
+// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User
+// Guide.
+//
+// Permissions
+//
// - General purpose bucket permissions - For information about permissions
-// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
-// in the Amazon S3 User Guide.
+// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide.
+//
// - Directory bucket permissions - To grant access to this API operation on a
-// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// API operation for session-based authorization. Specifically, you grant the
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
// s3express:CreateSession permission to the directory bucket in a bucket policy
// or an IAM identity-based policy. Then, you make the CreateSession API call on
// the bucket to obtain a session token. With the session token in your request
@@ -55,29 +62,48 @@ import (
// expires, you make another CreateSession API call to generate a new session
// token for use. Amazon Web Services CLI or SDKs create session and refresh the
// session token automatically to avoid service interruptions when a session
-// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// .
+// expires. For more information about authorization, see [CreateSession]CreateSession .
//
// Sorting of multipart uploads in response
+//
// - General purpose bucket - In the ListMultipartUploads response, the multipart
// uploads are sorted based on two criteria:
+//
// - Key-based sorting - Multipart uploads are initially sorted in ascending
// order based on their object keys.
+//
// - Time-based sorting - For uploads that share the same object key, they are
// further sorted in ascending order based on the upload initiation time. Among
// uploads with the same key, the one that was initiated first will appear before
// the ones that were initiated later.
+//
// - Directory bucket - In the ListMultipartUploads response, the multipart
// uploads aren't sorted lexicographically based on the object keys.
//
-// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
-// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are
-// related to ListMultipartUploads :
-// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
-// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
-// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
-// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
-// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket_name.s3express-az_id.region.amazonaws.com .
+//
+// The following operations are related to ListMultipartUploads :
+//
+// [CreateMultipartUpload]
+//
+// [UploadPart]
+//
+// [CompleteMultipartUpload]
+//
+// [ListParts]
+//
+// [AbortMultipartUpload]
+//
+// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html
+// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
+// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
+// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
+// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
+// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipartUploadsInput, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) {
if params == nil {
params = &ListMultipartUploadsInput{}
@@ -95,42 +121,52 @@ func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipart
type ListMultipartUploadsInput struct {
- // The name of the bucket to which the multipart upload was initiated. Directory
- // buckets - When you use this operation with a directory bucket, you must use
- // virtual-hosted-style requests in the format
+ // The name of the bucket to which the multipart upload was initiated.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
- // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide. Access points - When you use this action with an
- // access point, you must provide the alias of the access point in place of the
- // bucket name or specify the access point ARN. When using the access point ARN,
- // you must direct requests to the access point hostname. The access point hostname
- // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. Access points and Object Lambda access points are
- // not supported by directory buckets. S3 on Outposts - When you use this action
- // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
- // hostname. The S3 on Outposts hostname takes the form
+ // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
- // Character you use to group keys. All keys that contain the same string between
- // the prefix, if specified, and the first occurrence of the delimiter after the
- // prefix are grouped under a single result element, CommonPrefixes . If you don't
- // specify the prefix parameter, then the substring starts at the beginning of the
- // key. The keys that are grouped under CommonPrefixes result element are not
- // returned elsewhere in the response. Directory buckets - For directory buckets, /
- // is the only supported delimiter.
+ // Character you use to group keys.
+ //
+ // All keys that contain the same string between the prefix, if specified, and the
+ // first occurrence of the delimiter after the prefix are grouped under a single
+ // result element, CommonPrefixes . If you don't specify the prefix parameter, then
+ // the substring starts at the beginning of the key. The keys that are grouped
+ // under CommonPrefixes result element are not returned elsewhere in the response.
+ //
+ // Directory buckets - For directory buckets, / is the only supported delimiter.
Delimiter *string
// Requests Amazon S3 to encode the object keys in the response and specifies the
@@ -147,20 +183,26 @@ type ListMultipartUploadsInput struct {
ExpectedBucketOwner *string
// Specifies the multipart upload after which listing should begin.
+ //
// - General purpose buckets - For general purpose buckets, key-marker is an
// object key. Together with upload-id-marker , this parameter specifies the
- // multipart upload after which listing should begin. If upload-id-marker is not
- // specified, only the keys lexicographically greater than the specified
- // key-marker will be included in the list. If upload-id-marker is specified, any
- // multipart uploads for a key equal to the key-marker might also be included,
- // provided those multipart uploads have upload IDs lexicographically greater than
- // the specified upload-id-marker .
+ // multipart upload after which listing should begin.
+ //
+ // If upload-id-marker is not specified, only the keys lexicographically greater
+ // than the specified key-marker will be included in the list.
+ //
+ // If upload-id-marker is specified, any multipart uploads for a key equal to the
+ // key-marker might also be included, provided those multipart uploads have
+ // upload IDs lexicographically greater than the specified upload-id-marker .
+ //
// - Directory buckets - For directory buckets, key-marker is obfuscated and
// isn't a real object key. The upload-id-marker parameter isn't supported by
// directory buckets. To list the additional multipart uploads, you only need to
// set the value of key-marker to the NextKeyMarker value from the previous
- // response. In the ListMultipartUploads response, the multipart uploads aren't
- // sorted lexicographically based on the object keys.
+ // response.
+ //
+ // In the ListMultipartUploads response, the multipart uploads aren't sorted
+ // lexicographically based on the object keys.
KeyMarker *string
// Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the
@@ -171,26 +213,31 @@ type ListMultipartUploadsInput struct {
// Lists in-progress uploads only for those keys that begin with the specified
// prefix. You can use prefixes to separate a bucket into different grouping of
// keys. (You can think of using prefix to make groups in the same way that you'd
- // use a folder in a file system.) Directory buckets - For directory buckets, only
- // prefixes that end in a delimiter ( / ) are supported.
+ // use a folder in a file system.)
+ //
+ // Directory buckets - For directory buckets, only prefixes that end in a
+ // delimiter ( / ) are supported.
Prefix *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
// Together with key-marker, specifies the multipart upload after which listing
// should begin. If key-marker is not specified, the upload-id-marker parameter is
// ignored. Otherwise, any multipart uploads for a key equal to the key-marker
// might be included in the list only if they have an upload ID lexicographically
- // greater than the specified upload-id-marker . This functionality is not
- // supported for directory buckets.
+ // greater than the specified upload-id-marker .
+ //
+ // This functionality is not supported for directory buckets.
UploadIdMarker *string
noSmithyDocumentSerde
@@ -210,20 +257,25 @@ type ListMultipartUploadsOutput struct {
// If you specify a delimiter in the request, then the result returns each
// distinct key prefix containing the delimiter in a CommonPrefixes element. The
- // distinct key prefixes are returned in the Prefix child element. Directory
- // buckets - For directory buckets, only prefixes that end in a delimiter ( / ) are
- // supported.
+ // distinct key prefixes are returned in the Prefix child element.
+ //
+ // Directory buckets - For directory buckets, only prefixes that end in a
+ // delimiter ( / ) are supported.
CommonPrefixes []types.CommonPrefix
// Contains the delimiter you specified in the request. If you don't specify a
- // delimiter in your request, this element is absent from the response. Directory
- // buckets - For directory buckets, / is the only supported delimiter.
+ // delimiter in your request, this element is absent from the response.
+ //
+ // Directory buckets - For directory buckets, / is the only supported delimiter.
Delimiter *string
- // Encoding type used by Amazon S3 to encode object keys in the response. If you
- // specify the encoding-type request parameter, Amazon S3 includes this element in
- // the response, and returns encoded key name values in the following response
- // elements: Delimiter , KeyMarker , Prefix , NextKeyMarker , Key .
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ //
+ // If you specify the encoding-type request parameter, Amazon S3 includes this
+ // element in the response, and returns encoded key name values in the following
+ // response elements:
+ //
+ // Delimiter , KeyMarker , Prefix , NextKeyMarker , Key .
EncodingType types.EncodingType
// Indicates whether the returned list of multipart uploads is truncated. A value
@@ -244,26 +296,31 @@ type ListMultipartUploadsOutput struct {
NextKeyMarker *string
// When a list is truncated, this element specifies the value that should be used
- // for the upload-id-marker request parameter in a subsequent request. This
- // functionality is not supported for directory buckets.
+ // for the upload-id-marker request parameter in a subsequent request.
+ //
+ // This functionality is not supported for directory buckets.
NextUploadIdMarker *string
// When a prefix is provided in the request, this field contains the specified
// prefix. The result contains only keys starting with the specified prefix.
- // Directory buckets - For directory buckets, only prefixes that end in a delimiter
- // ( / ) are supported.
+ //
+ // Directory buckets - For directory buckets, only prefixes that end in a
+ // delimiter ( / ) are supported.
Prefix *string
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// Together with key-marker, specifies the multipart upload after which listing
// should begin. If key-marker is not specified, the upload-id-marker parameter is
// ignored. Otherwise, any multipart uploads for a key equal to the key-marker
// might be included in the list only if they have an upload ID lexicographically
- // greater than the specified upload-id-marker . This functionality is not
- // supported for directory buckets.
+ // greater than the specified upload-id-marker .
+ //
+ // This functionality is not supported for directory buckets.
UploadIdMarker *string
// Container for elements related to a particular multipart upload. A response can
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go
index bcb90eb2d1..f97d156939 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go
@@ -13,19 +13,34 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Returns metadata about
-// all versions of the objects in a bucket. You can also use request parameters as
-// selection criteria to return metadata about a subset of all the object versions.
+// This operation is not supported by directory buckets.
+//
+// Returns metadata about all versions of the objects in a bucket. You can also
+// use request parameters as selection criteria to return metadata about a subset
+// of all the object versions.
+//
// To use this operation, you must have permission to perform the
-// s3:ListBucketVersions action. Be aware of the name difference. A 200 OK
-// response can contain valid or invalid XML. Make sure to design your application
-// to parse the contents of the response and handle it appropriately. To use this
-// operation, you must have READ access to the bucket. The following operations are
-// related to ListObjectVersions :
-// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html)
-// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
-// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
-// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
+// s3:ListBucketVersions action. Be aware of the name difference.
+//
+// A 200 OK response can contain valid or invalid XML. Make sure to design your
+// application to parse the contents of the response and handle it appropriately.
+//
+// To use this operation, you must have READ access to the bucket.
+//
+// The following operations are related to ListObjectVersions :
+//
+// [ListObjectsV2]
+//
+// [GetObject]
+//
+// [PutObject]
+//
+// [DeleteObject]
+//
+// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
+// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+// [ListObjectsV2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html
func (c *Client) ListObjectVersions(ctx context.Context, params *ListObjectVersionsInput, optFns ...func(*Options)) (*ListObjectVersionsOutput, error) {
if params == nil {
params = &ListObjectVersionsInput{}
@@ -93,10 +108,12 @@ type ListObjectVersionsInput struct {
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
// Specifies the object version you want to start listing from.
@@ -127,10 +144,13 @@ type ListObjectVersionsOutput struct {
// max-keys limitation. These keys are not returned elsewhere in the response.
Delimiter *string
- // Encoding type used by Amazon S3 to encode object key names in the XML response.
+ // Encoding type used by Amazon S3 to encode object key names in the XML response.
+ //
// If you specify the encoding-type request parameter, Amazon S3 includes this
// element in the response, and returns encoded key name values in the following
- // response elements: KeyMarker, NextKeyMarker, Prefix, Key , and Delimiter .
+ // response elements:
+ //
+ // KeyMarker, NextKeyMarker, Prefix, Key , and Delimiter .
EncodingType types.EncodingType
// A flag that indicates whether Amazon S3 returned all of the results that
@@ -164,7 +184,9 @@ type ListObjectVersionsOutput struct {
Prefix *string
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// Marks the last version of the key returned in a truncated response.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go
index 9a3bf3e024..ee9fd98de5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go
@@ -13,19 +13,35 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Returns some or all (up
-// to 1,000) of the objects in a bucket. You can use the request parameters as
-// selection criteria to return a subset of the objects in a bucket. A 200 OK
-// response can contain valid or invalid XML. Be sure to design your application to
-// parse the contents of the response and handle it appropriately. This action has
-// been revised. We recommend that you use the newer version, ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html)
-// , when developing applications. For backward compatibility, Amazon S3 continues
-// to support ListObjects . The following operations are related to ListObjects :
-// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html)
-// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
-// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
-// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
-// - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html)
+// This operation is not supported by directory buckets.
+//
+// Returns some or all (up to 1,000) of the objects in a bucket. You can use the
+// request parameters as selection criteria to return a subset of the objects in a
+// bucket. A 200 OK response can contain valid or invalid XML. Be sure to design
+// your application to parse the contents of the response and handle it
+// appropriately.
+//
+// This action has been revised. We recommend that you use the newer version, [ListObjectsV2],
+// when developing applications. For backward compatibility, Amazon S3 continues to
+// support ListObjects .
+//
+// The following operations are related to ListObjects :
+//
+// [ListObjectsV2]
+//
+// [GetObject]
+//
+// [PutObject]
+//
+// [CreateBucket]
+//
+// [ListBuckets]
+//
+// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html
+// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
+// [ListObjectsV2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html
func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optFns ...func(*Options)) (*ListObjectsOutput, error) {
if params == nil {
params = &ListObjectsInput{}
@@ -43,31 +59,39 @@ func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optF
type ListObjectsInput struct {
- // The name of the bucket containing the objects. Directory buckets - When you use
- // this operation with a directory bucket, you must use virtual-hosted-style
- // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com .
- // Path-style requests are not supported. Directory bucket names must be unique in
- // the chosen Availability Zone. Bucket names must follow the format
- // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
- // ). For information about bucket naming restrictions, see Directory bucket
- // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide. Access points - When you use this action with an
- // access point, you must provide the alias of the access point in place of the
- // bucket name or specify the access point ARN. When using the access point ARN,
- // you must direct requests to the access point hostname. The access point hostname
- // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. Access points and Object Lambda access points are
- // not supported by directory buckets. S3 on Outposts - When you use this action
- // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
- // hostname. The S3 on Outposts hostname takes the form
+ // The name of the bucket containing the objects.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
+ // supported. Directory bucket names must be unique in the chosen Availability
+ // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
+ // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
+ // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -121,14 +145,20 @@ func (in *ListObjectsInput) bindEndpointParams(p *EndpointParameters) {
type ListObjectsOutput struct {
// All of the keys (up to 1,000) rolled up in a common prefix count as a single
- // return when calculating the number of returns. A response can contain
- // CommonPrefixes only if you specify a delimiter. CommonPrefixes contains all (if
- // there are any) keys between Prefix and the next occurrence of the string
- // specified by the delimiter. CommonPrefixes lists keys that act like
- // subdirectories in the directory specified by Prefix . For example, if the prefix
- // is notes/ and the delimiter is a slash ( / ), as in notes/summer/july , the
- // common prefix is notes/summer/ . All of the keys that roll up into a common
- // prefix count as a single return when calculating the number of returns.
+ // return when calculating the number of returns.
+ //
+ // A response can contain CommonPrefixes only if you specify a delimiter.
+ //
+ // CommonPrefixes contains all (if there are any) keys between Prefix and the next
+ // occurrence of the string specified by the delimiter.
+ //
+ // CommonPrefixes lists keys that act like subdirectories in the directory
+ // specified by Prefix .
+ //
+ // For example, if the prefix is notes/ and the delimiter is a slash ( / ), as in
+ // notes/summer/july , the common prefix is notes/summer/ . All of the keys that
+ // roll up into a common prefix count as a single return when calculating the
+ // number of returns.
CommonPrefixes []types.CommonPrefix
// Metadata about each object returned.
@@ -163,18 +193,21 @@ type ListObjectsOutput struct {
// When the response is truncated (the IsTruncated element value in the response
// is true ), you can use the key name in this field as the marker parameter in
// the subsequent request to get the next set of objects. Amazon S3 lists objects
- // in alphabetical order. This element is returned only if you have the delimiter
- // request parameter specified. If the response does not include the NextMarker
- // element and it is truncated, you can use the value of the last Key element in
- // the response as the marker parameter in the subsequent request to get the next
- // set of object keys.
+ // in alphabetical order.
+ //
+ // This element is returned only if you have the delimiter request parameter
+ // specified. If the response does not include the NextMarker element and it is
+ // truncated, you can use the value of the last Key element in the response as the
+ // marker parameter in the subsequent request to get the next set of object keys.
NextMarker *string
// Keys that begin with the indicated prefix.
Prefix *string
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go
index ee09d3cbde..50e9f5fbd8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go
@@ -17,26 +17,29 @@ import (
// You can use the request parameters as selection criteria to return a subset of
// the objects in a bucket. A 200 OK response can contain valid or invalid XML.
// Make sure to design your application to parse the contents of the response and
-// handle it appropriately. For more information about listing objects, see
-// Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html)
-// in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html)
-// . Directory buckets - For directory buckets, you must make requests for this API
+// handle it appropriately.
+//
+// For more information about listing objects, see [Listing object keys programmatically] in the Amazon S3 User Guide.
+// To get a list of your buckets, see [ListBuckets].
+//
+// Directory buckets - For directory buckets, you must make requests for this API
// operation to the Zonal endpoint. These endpoints support virtual-hosted-style
// requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style
-// requests are not supported. For more information, see Regional and Zonal
-// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide. Permissions
+// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User
+// Guide.
+//
+// Permissions
+//
// - General purpose bucket permissions - To use this operation, you must have
// READ access to the bucket. You must have permission to perform the
// s3:ListBucket action. The bucket owner has this permission by default and can
-// grant this permission to others. For more information about permissions, see
-// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// in the Amazon S3 User Guide.
+// grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations]
+// and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide.
+//
// - Directory bucket permissions - To grant access to this API operation on a
-// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// API operation for session-based authorization. Specifically, you grant the
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
// s3express:CreateSession permission to the directory bucket in a bucket policy
// or an IAM identity-based policy. Then, you make the CreateSession API call on
// the bucket to obtain a session token. With the session token in your request
@@ -44,24 +47,42 @@ import (
// expires, you make another CreateSession API call to generate a new session
// token for use. Amazon Web Services CLI or SDKs create session and refresh the
// session token automatically to avoid service interruptions when a session
-// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// .
+// expires. For more information about authorization, see [CreateSession]CreateSession .
//
// Sorting order of returned objects
+//
// - General purpose bucket - For general purpose buckets, ListObjectsV2 returns
// objects in lexicographical order based on their key names.
+//
// - Directory bucket - For directory buckets, ListObjectsV2 does not return
// objects in lexicographical order.
//
-// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
-// Bucket_name.s3express-az_id.region.amazonaws.com . This section describes the
-// latest revision of this action. We recommend that you use this revised API
-// operation for application development. For backward compatibility, Amazon S3
-// continues to support the prior version of this API operation, ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html)
-// . The following operations are related to ListObjectsV2 :
-// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
-// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
-// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket_name.s3express-az_id.region.amazonaws.com .
+//
+// This section describes the latest revision of this action. We recommend that
+// you use this revised API operation for application development. For backward
+// compatibility, Amazon S3 continues to support the prior version of this API
+// operation, [ListObjects].
+//
+// The following operations are related to ListObjectsV2 :
+//
+// [GetObject]
+//
+// [PutObject]
+//
+// [CreateBucket]
+//
+// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Listing object keys programmatically]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html
+// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input, optFns ...func(*Options)) (*ListObjectsV2Output, error) {
if params == nil {
params = &ListObjectsV2Input{}
@@ -79,30 +100,37 @@ func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input,
type ListObjectsV2Input struct {
- // Directory buckets - When you use this operation with a directory bucket, you
+ // Directory buckets - When you use this operation with a directory bucket, you
// must use virtual-hosted-style requests in the format
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
- // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide. Access points - When you use this action with an
- // access point, you must provide the alias of the access point in place of the
- // bucket name or specify the access point ARN. When using the access point ARN,
- // you must direct requests to the access point hostname. The access point hostname
- // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. Access points and Object Lambda access points are
- // not supported by directory buckets. S3 on Outposts - When you use this action
- // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
- // hostname. The S3 on Outposts hostname takes the form
+ // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -113,13 +141,15 @@ type ListObjectsV2Input struct {
ContinuationToken *string
// A delimiter is a character that you use to group keys.
- // - Directory buckets - For directory buckets, / is the only supported
- // delimiter.
+ //
+ // - Directory buckets - For directory buckets, / is the only supported delimiter.
+ //
// - Directory buckets - When you query ListObjectsV2 with a delimiter during
// in-progress multipart uploads, the CommonPrefixes response parameter contains
// the prefixes that are associated with the in-progress multipart uploads. For
- // more information about multipart uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html)
- // in the Amazon S3 User Guide.
+ // more information about multipart uploads, see [Multipart Upload Overview]in the Amazon S3 User Guide.
+ //
+ // [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html
Delimiter *string
// Encoding type used by Amazon S3 to encode object keys in the response. If using
@@ -134,8 +164,10 @@ type ListObjectsV2Input struct {
// The owner field is not present in ListObjectsV2 by default. If you want to
// return the owner field with each key in the result, then set the FetchOwner
- // field to true . Directory buckets - For directory buckets, the bucket owner is
- // returned as the object owner for all objects.
+ // field to true .
+ //
+ // Directory buckets - For directory buckets, the bucket owner is returned as the
+ // object owner for all objects.
FetchOwner *bool
// Sets the maximum number of keys returned in the response. By default, the
@@ -144,23 +176,28 @@ type ListObjectsV2Input struct {
MaxKeys *int32
// Specifies the optional fields that you want returned in the response. Fields
- // that you do not specify are not returned. This functionality is not supported
- // for directory buckets.
+ // that you do not specify are not returned.
+ //
+ // This functionality is not supported for directory buckets.
OptionalObjectAttributes []types.OptionalObjectAttributes
- // Limits the response to keys that begin with the specified prefix. Directory
- // buckets - For directory buckets, only prefixes that end in a delimiter ( / ) are
- // supported.
+ // Limits the response to keys that begin with the specified prefix.
+ //
+ // Directory buckets - For directory buckets, only prefixes that end in a
+ // delimiter ( / ) are supported.
Prefix *string
// Confirms that the requester knows that she or he will be charged for the list
// objects request in V2 style. Bucket owners need not specify this parameter in
- // their requests. This functionality is not supported for directory buckets.
+ // their requests.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer types.RequestPayer
// StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts
- // listing after this specified key. StartAfter can be any key in the bucket. This
- // functionality is not supported for directory buckets.
+ // listing after this specified key. StartAfter can be any key in the bucket.
+ //
+ // This functionality is not supported for directory buckets.
StartAfter *string
noSmithyDocumentSerde
@@ -176,43 +213,57 @@ type ListObjectsV2Output struct {
// All of the keys (up to 1,000) that share the same prefix are grouped together.
// When counting the total numbers of returns by this API operation, this group of
- // keys is considered as one item. A response can contain CommonPrefixes only if
- // you specify a delimiter. CommonPrefixes contains all (if there are any) keys
- // between Prefix and the next occurrence of the string specified by a delimiter.
+ // keys is considered as one item.
+ //
+ // A response can contain CommonPrefixes only if you specify a delimiter.
+ //
+ // CommonPrefixes contains all (if there are any) keys between Prefix and the next
+ // occurrence of the string specified by a delimiter.
+ //
// CommonPrefixes lists keys that act like subdirectories in the directory
- // specified by Prefix . For example, if the prefix is notes/ and the delimiter is
- // a slash ( / ) as in notes/summer/july , the common prefix is notes/summer/ . All
- // of the keys that roll up into a common prefix count as a single return when
- // calculating the number of returns.
+ // specified by Prefix .
+ //
+ // For example, if the prefix is notes/ and the delimiter is a slash ( / ) as in
+ // notes/summer/july , the common prefix is notes/summer/ . All of the keys that
+ // roll up into a common prefix count as a single return when calculating the
+ // number of returns.
+ //
// - Directory buckets - For directory buckets, only prefixes that end in a
// delimiter ( / ) are supported.
+ //
// - Directory buckets - When you query ListObjectsV2 with a delimiter during
// in-progress multipart uploads, the CommonPrefixes response parameter contains
// the prefixes that are associated with the in-progress multipart uploads. For
- // more information about multipart uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html)
- // in the Amazon S3 User Guide.
+ // more information about multipart uploads, see [Multipart Upload Overview]in the Amazon S3 User Guide.
+ //
+ // [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html
CommonPrefixes []types.CommonPrefix
// Metadata about each object returned.
Contents []types.Object
- // If ContinuationToken was sent with the request, it is included in the response.
- // You can use the returned ContinuationToken for pagination of the list response.
- // You can use this ContinuationToken for pagination of the list results.
+ // If ContinuationToken was sent with the request, it is included in the
+ // response. You can use the returned ContinuationToken for pagination of the list
+ // response. You can use this ContinuationToken for pagination of the list
+ // results.
ContinuationToken *string
// Causes keys that contain the same string between the prefix and the first
// occurrence of the delimiter to be rolled up into a single result element in the
// CommonPrefixes collection. These rolled-up keys are not returned elsewhere in
// the response. Each rolled-up result counts as only one return against the
- // MaxKeys value. Directory buckets - For directory buckets, / is the only
- // supported delimiter.
+ // MaxKeys value.
+ //
+ // Directory buckets - For directory buckets, / is the only supported delimiter.
Delimiter *string
// Encoding type used by Amazon S3 to encode object key names in the XML response.
+ //
// If you specify the encoding-type request parameter, Amazon S3 includes this
// element in the response, and returns encoded key name values in the following
- // response elements: Delimiter, Prefix, Key, and StartAfter .
+ // response elements:
+ //
+ // Delimiter, Prefix, Key, and StartAfter .
EncodingType types.EncodingType
// Set to false if all of the results were returned. Set to true if more keys are
@@ -239,16 +290,21 @@ type ListObjectsV2Output struct {
// obfuscated and is not a real key
NextContinuationToken *string
- // Keys that begin with the indicated prefix. Directory buckets - For directory
- // buckets, only prefixes that end in a delimiter ( / ) are supported.
+ // Keys that begin with the indicated prefix.
+ //
+ // Directory buckets - For directory buckets, only prefixes that end in a
+ // delimiter ( / ) are supported.
Prefix *string
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
- // If StartAfter was sent with the request, it is included in the response. This
- // functionality is not supported for directory buckets.
+ // If StartAfter was sent with the request, it is included in the response.
+ //
+ // This functionality is not supported for directory buckets.
StartAfter *string
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go
index 3f3946d935..4e40bb5bf7 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go
@@ -14,55 +14,79 @@ import (
"time"
)
-// Lists the parts that have been uploaded for a specific multipart upload. To use
-// this operation, you must provide the upload ID in the request. You obtain this
-// uploadID by sending the initiate multipart upload request through
-// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
-// . The ListParts request returns a maximum of 1,000 uploaded parts. The limit of
+// Lists the parts that have been uploaded for a specific multipart upload.
+//
+// To use this operation, you must provide the upload ID in the request. You
+// obtain this uploadID by sending the initiate multipart upload request through [CreateMultipartUpload].
+//
+// The ListParts request returns a maximum of 1,000 uploaded parts. The limit of
// 1,000 parts is also the default value. You can restrict the number of parts in a
// response by specifying the max-parts request parameter. If your multipart
// upload consists of more than 1,000 parts, the response returns an IsTruncated
// field with the value of true , and a NextPartNumberMarker element. To list
// remaining uploaded parts, in subsequent ListParts requests, include the
// part-number-marker query string parameter and set its value to the
-// NextPartNumberMarker field value from the previous response. For more
-// information on multipart uploads, see Uploading Objects Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html)
-// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must
-// make requests for this API operation to the Zonal endpoint. These endpoints
-// support virtual-hosted-style requests in the format
+// NextPartNumberMarker field value from the previous response.
+//
+// For more information on multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style
-// requests are not supported. For more information, see Regional and Zonal
-// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide. Permissions
+// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User
+// Guide.
+//
+// Permissions
// - General purpose bucket permissions - For information about permissions
-// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
-// in the Amazon S3 User Guide. If the upload was created using server-side
-// encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer
-// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must
-// have permission to the kms:Decrypt action for the ListParts request to
-// succeed.
-// - Directory bucket permissions - To grant access to this API operation on a
-// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// API operation for session-based authorization. Specifically, you grant the
-// s3express:CreateSession permission to the directory bucket in a bucket policy
-// or an IAM identity-based policy. Then, you make the CreateSession API call on
-// the bucket to obtain a session token. With the session token in your request
-// header, you can make API requests to this operation. After the session token
-// expires, you make another CreateSession API call to generate a new session
-// token for use. Amazon Web Services CLI or SDKs create session and refresh the
-// session token automatically to avoid service interruptions when a session
-// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// .
+// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide.
+//
+// If the upload was created using server-side encryption with Key Management
+//
+// Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon
+// Web Services KMS keys (DSSE-KMS), you must have permission to the kms:Decrypt
+// action for the ListParts request to succeed.
+//
+// - Directory bucket permissions - To grant access to this API operation on a
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
+// s3express:CreateSession permission to the directory bucket in a bucket policy
+// or an IAM identity-based policy. Then, you make the CreateSession API call on
+// the bucket to obtain a session token. With the session token in your request
+// header, you can make API requests to this operation. After the session token
+// expires, you make another CreateSession API call to generate a new session
+// token for use. Amazon Web Services CLI or SDKs create session and refresh the
+// session token automatically to avoid service interruptions when a session
+// expires. For more information about authorization, see [CreateSession]CreateSession .
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket_name.s3express-az_id.region.amazonaws.com .
+//
+// The following operations are related to ListParts :
+//
+// [CreateMultipartUpload]
+//
+// [UploadPart]
+//
+// [CompleteMultipartUpload]
//
-// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
-// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are
-// related to ListParts :
-// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
-// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
-// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
-// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
-// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html)
-// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
+// [AbortMultipartUpload]
+//
+// [GetObjectAttributes]
+//
+// [ListMultipartUploads]
+//
+// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html
+// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
+// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html
+// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
+// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
+// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
+// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
+//
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns ...func(*Options)) (*ListPartsOutput, error) {
if params == nil {
params = &ListPartsInput{}
@@ -80,31 +104,39 @@ func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns .
type ListPartsInput struct {
- // The name of the bucket to which the parts are being uploaded. Directory buckets
- // - When you use this operation with a directory bucket, you must use
- // virtual-hosted-style requests in the format
+ // The name of the bucket to which the parts are being uploaded.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
- // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide. Access points - When you use this action with an
- // access point, you must provide the alias of the access point in place of the
- // bucket name or specify the access point ARN. When using the access point ARN,
- // you must direct requests to the access point hostname. The access point hostname
- // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. Access points and Object Lambda access points are
- // not supported by directory buckets. S3 on Outposts - When you use this action
- // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
- // hostname. The S3 on Outposts hostname takes the form
+ // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -135,31 +167,39 @@ type ListPartsInput struct {
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
// The server-side encryption (SSE) algorithm used to encrypt the object. This
// parameter is needed only when the object was created using a checksum algorithm.
- // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // For more information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
SSECustomerAlgorithm *string
// The server-side encryption (SSE) customer managed key. This parameter is needed
// only when the object was created using a checksum algorithm. For more
- // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
SSECustomerKey *string
// The MD5 server-side encryption (SSE) customer managed key. This parameter is
// needed only when the object was created using a checksum algorithm. For more
- // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
SSECustomerKeyMD5 *string
noSmithyDocumentSerde
@@ -177,17 +217,21 @@ type ListPartsOutput struct {
// incomplete multipart uploads and the prefix in the lifecycle rule matches the
// object name in the request, then the response includes this header indicating
// when the initiated multipart upload will become eligible for abort operation.
- // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
- // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
- // . The response will also include the x-amz-abort-rule-id header that will
- // provide the ID of the lifecycle configuration rule that defines this action.
+ // For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration].
+ //
+ // The response will also include the x-amz-abort-rule-id header that will provide
+ // the ID of the lifecycle configuration rule that defines this action.
+ //
// This functionality is not supported for directory buckets.
+ //
+ // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config
AbortDate *time.Time
// This header is returned along with the x-amz-abort-date header. It identifies
// applicable lifecycle configuration rule that defines the action to abort
- // incomplete multipart uploads. This functionality is not supported for directory
- // buckets.
+ // incomplete multipart uploads.
+ //
+ // This functionality is not supported for directory buckets.
AbortRuleId *string
// The name of the bucket to which the multipart upload was initiated. Does not
@@ -203,7 +247,7 @@ type ListPartsOutput struct {
// provides the user ARN and display name.
Initiator *types.Initiator
- // Indicates whether the returned list of parts is truncated. A true value
+ // Indicates whether the returned list of parts is truncated. A true value
// indicates that the list was truncated. A list can be truncated if the number of
// parts exceeds the limit returned in the MaxParts element.
IsTruncated *bool
@@ -221,8 +265,10 @@ type ListPartsOutput struct {
// Container element that identifies the object owner, after the object is
// created. If multipart upload is initiated by an IAM user, this element provides
- // the parent account ID and display name. Directory buckets - The bucket owner is
- // returned as the object owner for all the parts.
+ // the parent account ID and display name.
+ //
+ // Directory buckets - The bucket owner is returned as the object owner for all
+ // the parts.
Owner *types.Owner
// Specifies the part after which listing should begin. Only parts with higher
@@ -234,12 +280,15 @@ type ListPartsOutput struct {
Parts []types.Part
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
- // The class of storage used to store the uploaded object. Directory buckets -
- // Only the S3 Express One Zone storage class is supported by directory buckets to
- // store objects.
+ // The class of storage used to store the uploaded object.
+ //
+ // Directory buckets - Only the S3 Express One Zone storage class is supported by
+ // directory buckets to store objects.
StorageClass types.StorageClass
// Upload ID identifying the multipart upload whose parts are being listed.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go
index c15d55f1f9..a968551024 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go
@@ -15,30 +15,45 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Sets the accelerate
-// configuration of an existing bucket. Amazon S3 Transfer Acceleration is a
-// bucket-level feature that enables you to perform faster data transfers to Amazon
-// S3. To use this operation, you must have permission to perform the
+// This operation is not supported by directory buckets.
+//
+// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer
+// Acceleration is a bucket-level feature that enables you to perform faster data
+// transfers to Amazon S3.
+//
+// To use this operation, you must have permission to perform the
// s3:PutAccelerateConfiguration action. The bucket owner has this permission by
// default. The bucket owner can grant this permission to others. For more
-// information about permissions, see Permissions Related to Bucket Subresource
-// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// . The Transfer Acceleration state of a bucket can be set to one of the following
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// The Transfer Acceleration state of a bucket can be set to one of the following
// two values:
+//
// - Enabled – Enables accelerated data transfers to the bucket.
+//
// - Suspended – Disables accelerated data transfers to the bucket.
//
-// The GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html)
-// action returns the transfer acceleration state of a bucket. After setting the
-// Transfer Acceleration state of a bucket to Enabled, it might take up to thirty
-// minutes before the data transfer rates to the bucket increase. The name of the
-// bucket used for Transfer Acceleration must be DNS-compliant and must not contain
-// periods ("."). For more information about transfer acceleration, see Transfer
-// Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html)
-// . The following operations are related to PutBucketAccelerateConfiguration :
-// - GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html)
-// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
+// The [GetBucketAccelerateConfiguration] action returns the transfer acceleration state of a bucket.
+//
+// After setting the Transfer Acceleration state of a bucket to Enabled, it might
+// take up to thirty minutes before the data transfer rates to the bucket increase.
+//
+// The name of the bucket used for Transfer Acceleration must be DNS-compliant and
+// must not contain periods (".").
+//
+// For more information about transfer acceleration, see [Transfer Acceleration].
+//
+// The following operations are related to PutBucketAccelerateConfiguration :
+//
+// [GetBucketAccelerateConfiguration]
+//
+// [CreateBucket]
+//
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
+// [GetBucketAccelerateConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
func (c *Client) PutBucketAccelerateConfiguration(ctx context.Context, params *PutBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*PutBucketAccelerateConfigurationOutput, error) {
if params == nil {
params = &PutBucketAccelerateConfigurationInput{}
@@ -70,10 +85,13 @@ type PutBucketAccelerateConfigurationInput struct {
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
- // request with the HTTP status code 400 Bad Request . For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
- // ignores any provided ChecksumAlgorithm parameter.
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
// The account ID of the expected bucket owner. If the account ID that you provide
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go
index f88bb4af22..d61376f010 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go
@@ -15,89 +15,159 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Sets the permissions on
-// an existing bucket using access control lists (ACL). For more information, see
-// Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html)
-// . To set the ACL of a bucket, you must have the WRITE_ACP permission. You can
-// use one of the following two ways to set a bucket's permissions:
+// This operation is not supported by directory buckets.
+//
+// Sets the permissions on an existing bucket using access control lists (ACL).
+// For more information, see [Using ACLs]. To set the ACL of a bucket, you must have the
+// WRITE_ACP permission.
+//
+// You can use one of the following two ways to set a bucket's permissions:
+//
// - Specify the ACL in the request body
+//
// - Specify permissions using request headers
//
// You cannot specify access permission using both the body and the request
-// headers. Depending on your application needs, you may choose to set the ACL on a
-// bucket using either the request body or the headers. For example, if you have an
+// headers.
+//
+// Depending on your application needs, you may choose to set the ACL on a bucket
+// using either the request body or the headers. For example, if you have an
// existing application that updates a bucket ACL using the request body, then you
-// can continue to use that approach. If your bucket uses the bucket owner enforced
-// setting for S3 Object Ownership, ACLs are disabled and no longer affect
-// permissions. You must use policies to grant access to your bucket and the
-// objects in it. Requests to set ACLs or update ACLs fail and return the
-// AccessControlListNotSupported error code. Requests to read ACLs are still
-// supported. For more information, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
-// in the Amazon S3 User Guide. Permissions You can set access permissions by using
-// one of the following methods:
+// can continue to use that approach.
+//
+// If your bucket uses the bucket owner enforced setting for S3 Object Ownership,
+// ACLs are disabled and no longer affect permissions. You must use policies to
+// grant access to your bucket and the objects in it. Requests to set ACLs or
+// update ACLs fail and return the AccessControlListNotSupported error code.
+// Requests to read ACLs are still supported. For more information, see [Controlling object ownership]in the
+// Amazon S3 User Guide.
+//
+// Permissions You can set access permissions by using one of the following
+// methods:
+//
// - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a
// set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined
// set of grantees and permissions. Specify the canned ACL name as the value of
// x-amz-acl . If you use this header, you cannot use other access
-// control-specific headers in your request. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL)
-// .
+// control-specific headers in your request. For more information, see [Canned ACL].
+//
// - Specify access permissions explicitly with the x-amz-grant-read ,
// x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control
// headers. When using these headers, you specify explicit access permissions and
// grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the
// permission. If you use these ACL-specific headers, you cannot use the
// x-amz-acl header to set a canned ACL. These parameters map to the set of
-// permissions that Amazon S3 supports in an ACL. For more information, see
-// Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
-// . You specify each grantee as a type=value pair, where the type is one of the
-// following:
-// - id – if the value specified is the canonical user ID of an Amazon Web
-// Services account
-// - uri – if you are granting permissions to a predefined group
-// - emailAddress – if the value specified is the email address of an Amazon Web
-// Services account Using email addresses to specify a grantee is only supported in
-// the following Amazon Web Services Regions:
-// - US East (N. Virginia)
-// - US West (N. California)
-// - US West (Oregon)
-// - Asia Pacific (Singapore)
-// - Asia Pacific (Sydney)
-// - Asia Pacific (Tokyo)
-// - Europe (Ireland)
-// - South America (São Paulo) For a list of all the Amazon S3 supported Regions
-// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
-// in the Amazon Web Services General Reference. For example, the following
-// x-amz-grant-write header grants create, overwrite, and delete objects
-// permission to LogDelivery group predefined by Amazon S3 and two Amazon Web
-// Services accounts identified by their email addresses. x-amz-grant-write:
-// uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333",
-// id="555566667777"
+// permissions that Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview].
+//
+// You specify each grantee as a type=value pair, where the type is one of the
+//
+// following:
+//
+// - id – if the value specified is the canonical user ID of an Amazon Web
+// Services account
+//
+// - uri – if you are granting permissions to a predefined group
+//
+// - emailAddress – if the value specified is the email address of an Amazon Web
+// Services account
+//
+// Using email addresses to specify a grantee is only supported in the following
+//
+// Amazon Web Services Regions:
+//
+// - US East (N. Virginia)
+//
+// - US West (N. California)
+//
+// - US West (Oregon)
+//
+// - Asia Pacific (Singapore)
+//
+// - Asia Pacific (Sydney)
+//
+// - Asia Pacific (Tokyo)
+//
+// - Europe (Ireland)
+//
+// - South America (São Paulo)
+//
+// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the
+//
+// Amazon Web Services General Reference.
+//
+// For example, the following x-amz-grant-write header grants create, overwrite,
+//
+// and delete objects permission to LogDelivery group predefined by Amazon S3 and
+// two Amazon Web Services accounts identified by their email addresses.
+//
+// x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery",
+//
+// id="111122223333", id="555566667777"
//
// You can use either a canned ACL or specify access permissions explicitly. You
-// cannot do both. Grantee Values You can specify the person (grantee) to whom
-// you're assigning access rights (using request elements) in the following ways:
-// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and
-// ignored in the request
-// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>
-// - By Email address: <>Grantees@email.com<>& The grantee is resolved to the
-// CanonicalUser and, in a response to a GET Object acl request, appears as the
-// CanonicalUser. Using email addresses to specify a grantee is only supported in
-// the following Amazon Web Services Regions:
-// - US East (N. Virginia)
-// - US West (N. California)
-// - US West (Oregon)
-// - Asia Pacific (Singapore)
-// - Asia Pacific (Sydney)
-// - Asia Pacific (Tokyo)
-// - Europe (Ireland)
-// - South America (São Paulo) For a list of all the Amazon S3 supported Regions
-// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
-// in the Amazon Web Services General Reference.
+// cannot do both.
+//
+// Grantee Values You can specify the person (grantee) to whom you're assigning
+// access rights (using request elements) in the following ways:
+//
+// - By the person's ID:
+//
+// <>ID<><>GranteesEmail<>
+//
+// DisplayName is optional and ignored in the request
+//
+// - By URI:
+//
+// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>
+//
+// - By Email address:
+//
+// <>Grantees@email.com<>&
+//
+// The grantee is resolved to the CanonicalUser and, in a response to a GET Object
+//
+// acl request, appears as the CanonicalUser.
+//
+// Using email addresses to specify a grantee is only supported in the following
+//
+// Amazon Web Services Regions:
+//
+// - US East (N. Virginia)
+//
+// - US West (N. California)
+//
+// - US West (Oregon)
+//
+// - Asia Pacific (Singapore)
+//
+// - Asia Pacific (Sydney)
+//
+// - Asia Pacific (Tokyo)
+//
+// - Europe (Ireland)
+//
+// - South America (São Paulo)
+//
+// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the
+//
+// Amazon Web Services General Reference.
//
// The following operations are related to PutBucketAcl :
-// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
-// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html)
-// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html)
+//
+// [CreateBucket]
+//
+// [DeleteBucket]
+//
+// [GetObjectAcl]
+//
+// [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
+// [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
+// [Controlling object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
+// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
+// [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html
+// [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
+// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
func (c *Client) PutBucketAcl(ctx context.Context, params *PutBucketAclInput, optFns ...func(*Options)) (*PutBucketAclOutput, error) {
if params == nil {
params = &PutBucketAclInput{}
@@ -130,17 +200,23 @@ type PutBucketAclInput struct {
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
- // request with the HTTP status code 400 Bad Request . For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
- // ignores any provided ChecksumAlgorithm parameter.
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
// The base64-encoded 128-bit MD5 digest of the data. This header must be used as
// a message integrity check to verify that the request body was not corrupted in
- // transit. For more information, go to RFC 1864. (http://www.ietf.org/rfc/rfc1864.txt)
+ // transit. For more information, go to [RFC 1864.]
+ //
// For requests made using the Amazon Web Services Command Line Interface (CLI) or
// Amazon Web Services SDKs, this field is calculated automatically.
+ //
+ // [RFC 1864.]: http://www.ietf.org/rfc/rfc1864.txt
ContentMD5 *string
// The account ID of the expected bucket owner. If the account ID that you provide
@@ -158,9 +234,10 @@ type PutBucketAclInput struct {
// Allows grantee to read the bucket ACL.
GrantReadACP *string
- // Allows grantee to create new objects in the bucket. For the bucket and object
- // owners of existing objects, also allows deletions and overwrites of those
- // objects.
+ // Allows grantee to create new objects in the bucket.
+ //
+ // For the bucket and object owners of existing objects, also allows deletions and
+ // overwrites of those objects.
GrantWrite *string
// Allows grantee to write the ACL for the applicable bucket.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go
index 0604fb930a..292a04b776 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go
@@ -14,45 +14,67 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Sets an analytics
-// configuration for the bucket (specified by the analytics configuration ID). You
-// can have up to 1,000 analytics configurations per bucket. You can choose to have
-// storage class analysis export analysis reports sent to a comma-separated values
-// (CSV) flat file. See the DataExport request element. Reports are updated daily
-// and are based on the object filters that you configure. When selecting data
-// export, you specify a destination bucket and an optional destination prefix
-// where the file is written. You can export the data to a destination bucket in a
-// different account. However, the destination bucket must be in the same Region as
-// the bucket that you are making the PUT analytics configuration to. For more
-// information, see Amazon S3 Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html)
-// . You must create a bucket policy on the destination bucket where the exported
+// This operation is not supported by directory buckets.
+//
+// Sets an analytics configuration for the bucket (specified by the analytics
+// configuration ID). You can have up to 1,000 analytics configurations per bucket.
+//
+// You can choose to have storage class analysis export analysis reports sent to a
+// comma-separated values (CSV) flat file. See the DataExport request element.
+// Reports are updated daily and are based on the object filters that you
+// configure. When selecting data export, you specify a destination bucket and an
+// optional destination prefix where the file is written. You can export the data
+// to a destination bucket in a different account. However, the destination bucket
+// must be in the same Region as the bucket that you are making the PUT analytics
+// configuration to. For more information, see [Amazon S3 Analytics – Storage Class Analysis].
+//
+// You must create a bucket policy on the destination bucket where the exported
// file is written to grant permissions to Amazon S3 to write objects to the
-// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory
-// and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9)
-// . To use this operation, you must have permissions to perform the
+// bucket. For an example policy, see [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis].
+//
+// To use this operation, you must have permissions to perform the
// s3:PutAnalyticsConfiguration action. The bucket owner has this permission by
// default. The bucket owner can grant this permission to others. For more
-// information about permissions, see Permissions Related to Bucket Subresource
-// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// . PutBucketAnalyticsConfiguration has the following special errors:
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// PutBucketAnalyticsConfiguration has the following special errors:
+//
// - HTTP Error: HTTP 400 Bad Request
+//
// - Code: InvalidArgument
+//
// - Cause: Invalid argument.
+//
// - HTTP Error: HTTP 400 Bad Request
+//
// - Code: TooManyConfigurations
+//
// - Cause: You are attempting to create a new configuration but have already
// reached the 1,000-configuration limit.
+//
// - HTTP Error: HTTP 403 Forbidden
+//
// - Code: AccessDenied
+//
// - Cause: You are not the owner of the specified bucket, or you do not have
// the s3:PutAnalyticsConfiguration bucket permission to set the configuration on
// the bucket.
//
// The following operations are related to PutBucketAnalyticsConfiguration :
-// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html)
-// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html)
-// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html)
+//
+// [GetBucketAnalyticsConfiguration]
+//
+// [DeleteBucketAnalyticsConfiguration]
+//
+// [ListBucketAnalyticsConfigurations]
+//
+// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html
+// [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9
+// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html
+// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
func (c *Client) PutBucketAnalyticsConfiguration(ctx context.Context, params *PutBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*PutBucketAnalyticsConfigurationOutput, error) {
if params == nil {
params = &PutBucketAnalyticsConfigurationInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go
index 3e6604ef6a..e56061b652 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go
@@ -15,35 +15,54 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Sets the cors
-// configuration for your bucket. If the configuration exists, Amazon S3 replaces
-// it. To use this operation, you must be allowed to perform the s3:PutBucketCORS
+// This operation is not supported by directory buckets.
+//
+// Sets the cors configuration for your bucket. If the configuration exists,
+// Amazon S3 replaces it.
+//
+// To use this operation, you must be allowed to perform the s3:PutBucketCORS
// action. By default, the bucket owner has this permission and can grant it to
-// others. You set this configuration on a bucket so that the bucket can service
+// others.
+//
+// You set this configuration on a bucket so that the bucket can service
// cross-origin requests. For example, you might want to enable a request whose
// origin is http://www.example.com to access your Amazon S3 bucket at
-// my.example.bucket.com by using the browser's XMLHttpRequest capability. To
-// enable cross-origin resource sharing (CORS) on a bucket, you add the cors
+// my.example.bucket.com by using the browser's XMLHttpRequest capability.
+//
+// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors
// subresource to the bucket. The cors subresource is an XML document in which you
// configure rules that identify origins and the HTTP methods that can be executed
-// on your bucket. The document is limited to 64 KB in size. When Amazon S3
-// receives a cross-origin request (or a pre-flight OPTIONS request) against a
-// bucket, it evaluates the cors configuration on the bucket and uses the first
-// CORSRule rule that matches the incoming browser request to enable a cross-origin
-// request. For a rule to match, the following conditions must be met:
+// on your bucket. The document is limited to 64 KB in size.
+//
+// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS
+// request) against a bucket, it evaluates the cors configuration on the bucket
+// and uses the first CORSRule rule that matches the incoming browser request to
+// enable a cross-origin request. For a rule to match, the following conditions
+// must be met:
+//
// - The request's Origin header must match AllowedOrigin elements.
+//
// - The request method (for example, GET, PUT, HEAD, and so on) or the
// Access-Control-Request-Method header in case of a pre-flight OPTIONS request
// must be one of the AllowedMethod elements.
+//
// - Every header specified in the Access-Control-Request-Headers request header
// of a pre-flight request must match an AllowedHeader element.
//
-// For more information about CORS, go to Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html)
-// in the Amazon S3 User Guide. The following operations are related to
-// PutBucketCors :
-// - GetBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html)
-// - DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html)
-// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html)
+// For more information about CORS, go to [Enabling Cross-Origin Resource Sharing] in the Amazon S3 User Guide.
+//
+// The following operations are related to PutBucketCors :
+//
+// [GetBucketCors]
+//
+// [DeleteBucketCors]
+//
+// [RESTOPTIONSobject]
+//
+// [GetBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html
+// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html
+// [RESTOPTIONSobject]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html
+// [DeleteBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html
func (c *Client) PutBucketCors(ctx context.Context, params *PutBucketCorsInput, optFns ...func(*Options)) (*PutBucketCorsOutput, error) {
if params == nil {
params = &PutBucketCorsInput{}
@@ -67,8 +86,9 @@ type PutBucketCorsInput struct {
Bucket *string
// Describes the cross-origin access configuration for objects in an Amazon S3
- // bucket. For more information, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html)
- // in the Amazon S3 User Guide.
+ // bucket. For more information, see [Enabling Cross-Origin Resource Sharing]in the Amazon S3 User Guide.
+ //
+ // [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html
//
// This member is required.
CORSConfiguration *types.CORSConfiguration
@@ -77,17 +97,23 @@ type PutBucketCorsInput struct {
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
- // request with the HTTP status code 400 Bad Request . For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
- // ignores any provided ChecksumAlgorithm parameter.
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
// The base64-encoded 128-bit MD5 digest of the data. This header must be used as
// a message integrity check to verify that the request body was not corrupted in
- // transit. For more information, go to RFC 1864. (http://www.ietf.org/rfc/rfc1864.txt)
+ // transit. For more information, go to [RFC 1864.]
+ //
// For requests made using the Amazon Web Services Command Line Interface (CLI) or
// Amazon Web Services SDKs, this field is calculated automatically.
+ //
+ // [RFC 1864.]: http://www.ietf.org/rfc/rfc1864.txt
ContentMD5 *string
// The account ID of the expected bucket owner. If the account ID that you provide
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go
index dfc71dc5c6..01e160cdd3 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go
@@ -15,30 +15,41 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. This action uses the
-// encryption subresource to configure default encryption and Amazon S3 Bucket Keys
-// for an existing bucket. By default, all buckets have a default encryption
-// configuration that uses server-side encryption with Amazon S3 managed keys
-// (SSE-S3). You can optionally configure default encryption for a bucket by using
-// server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or
-// dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS).
-// If you specify default encryption by using SSE-KMS, you can also configure
-// Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html)
-// . If you use PutBucketEncryption to set your default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
-// to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does
-// not validate the KMS key ID provided in PutBucketEncryption requests. This
-// action requires Amazon Web Services Signature Version 4. For more information,
-// see Authenticating Requests (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html)
-// . To use this operation, you must have permission to perform the
+// This operation is not supported by directory buckets.
+//
+// This action uses the encryption subresource to configure default encryption and
+// Amazon S3 Bucket Keys for an existing bucket.
+//
+// By default, all buckets have a default encryption configuration that uses
+// server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally
+// configure default encryption for a bucket by using server-side encryption with
+// Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption
+// with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption
+// by using SSE-KMS, you can also configure [Amazon S3 Bucket Keys]. If you use PutBucketEncryption to
+// set your [default bucket encryption]to SSE-KMS, you should verify that your KMS key ID is correct. Amazon
+// S3 does not validate the KMS key ID provided in PutBucketEncryption requests.
+//
+// This action requires Amazon Web Services Signature Version 4. For more
+// information, see [Authenticating Requests (Amazon Web Services Signature Version 4)].
+//
+// To use this operation, you must have permission to perform the
// s3:PutEncryptionConfiguration action. The bucket owner has this permission by
// default. The bucket owner can grant this permission to others. For more
-// information about permissions, see Permissions Related to Bucket Subresource
-// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// in the Amazon S3 User Guide. The following operations are related to
-// PutBucketEncryption :
-// - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html)
-// - DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html)
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources] in the Amazon S3 User Guide.
+//
+// The following operations are related to PutBucketEncryption :
+//
+// [GetBucketEncryption]
+//
+// [DeleteBucketEncryption]
+//
+// [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html
+// [GetBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html
+// [DeleteBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [default bucket encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [Authenticating Requests (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
func (c *Client) PutBucketEncryption(ctx context.Context, params *PutBucketEncryptionInput, optFns ...func(*Options)) (*PutBucketEncryptionOutput, error) {
if params == nil {
params = &PutBucketEncryptionInput{}
@@ -62,8 +73,9 @@ type PutBucketEncryptionInput struct {
// (SSE-S3). You can optionally configure default encryption for a bucket by using
// server-side encryption with an Amazon Web Services KMS key (SSE-KMS) or a
// customer-provided key (SSE-C). For information about the bucket default
- // encryption feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
- // in the Amazon S3 User Guide.
+ // encryption feature, see [Amazon S3 Bucket Default Encryption]in the Amazon S3 User Guide.
+ //
+ // [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html
//
// This member is required.
Bucket *string
@@ -77,16 +89,20 @@ type PutBucketEncryptionInput struct {
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
- // request with the HTTP status code 400 Bad Request . For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
- // ignores any provided ChecksumAlgorithm parameter.
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
// The base64-encoded 128-bit MD5 digest of the server-side encryption
- // configuration. For requests made using the Amazon Web Services Command Line
- // Interface (CLI) or Amazon Web Services SDKs, this field is calculated
- // automatically.
+ // configuration.
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
ContentMD5 *string
// The account ID of the expected bucket owner. If the account ID that you provide
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go
index 61d73da569..8aac2b9871 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go
@@ -14,37 +14,58 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Puts a S3
-// Intelligent-Tiering configuration to the specified bucket. You can have up to
-// 1,000 S3 Intelligent-Tiering configurations per bucket. The S3
-// Intelligent-Tiering storage class is designed to optimize storage costs by
-// automatically moving data to the most cost-effective storage access tier,
+// This operation is not supported by directory buckets.
+//
+// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can
+// have up to 1,000 S3 Intelligent-Tiering configurations per bucket.
+//
+// The S3 Intelligent-Tiering storage class is designed to optimize storage costs
+// by automatically moving data to the most cost-effective storage access tier,
// without performance impact or operational overhead. S3 Intelligent-Tiering
// delivers automatic cost savings in three low latency and high throughput access
// tiers. To get the lowest storage cost on data that can be accessed in minutes to
-// hours, you can choose to activate additional archiving capabilities. The S3
-// Intelligent-Tiering storage class is the ideal storage class for data with
-// unknown, changing, or unpredictable access patterns, independent of object size
-// or retention period. If the size of an object is less than 128 KB, it is not
-// monitored and not eligible for auto-tiering. Smaller objects can be stored, but
-// they are always charged at the Frequent Access tier rates in the S3
-// Intelligent-Tiering storage class. For more information, see Storage class for
-// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access)
-// . Operations related to PutBucketIntelligentTieringConfiguration include:
-// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html)
-// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html)
-// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html)
+// hours, you can choose to activate additional archiving capabilities.
+//
+// The S3 Intelligent-Tiering storage class is the ideal storage class for data
+// with unknown, changing, or unpredictable access patterns, independent of object
+// size or retention period. If the size of an object is less than 128 KB, it is
+// not monitored and not eligible for auto-tiering. Smaller objects can be stored,
+// but they are always charged at the Frequent Access tier rates in the S3
+// Intelligent-Tiering storage class.
+//
+// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects].
+//
+// Operations related to PutBucketIntelligentTieringConfiguration include:
+//
+// [DeleteBucketIntelligentTieringConfiguration]
+//
+// [GetBucketIntelligentTieringConfiguration]
+//
+// [ListBucketIntelligentTieringConfigurations]
//
// You only need S3 Intelligent-Tiering enabled on a bucket if you want to
// automatically move objects stored in the S3 Intelligent-Tiering storage class to
// the Archive Access or Deep Archive Access tier.
-// PutBucketIntelligentTieringConfiguration has the following special errors: HTTP
-// 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument HTTP 400 Bad
-// Request Error Code: TooManyConfigurations Cause: You are attempting to create a
-// new configuration but have already reached the 1,000-configuration limit. HTTP
-// 403 Forbidden Error Cause: You are not the owner of the specified bucket, or you
-// do not have the s3:PutIntelligentTieringConfiguration bucket permission to set
-// the configuration on the bucket.
+//
+// PutBucketIntelligentTieringConfiguration has the following special errors:
+//
+// HTTP 400 Bad Request Error Code: InvalidArgument
+//
+// Cause: Invalid Argument
+//
+// HTTP 400 Bad Request Error Code: TooManyConfigurations
+//
+// Cause: You are attempting to create a new configuration but have already
+// reached the 1,000-configuration limit.
+//
+// HTTP 403 Forbidden Error Cause: You are not the owner of the specified bucket,
+// or you do not have the s3:PutIntelligentTieringConfiguration bucket permission
+// to set the configuration on the bucket.
+//
+// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html
+// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html
+// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access
+// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html
func (c *Client) PutBucketIntelligentTieringConfiguration(ctx context.Context, params *PutBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*PutBucketIntelligentTieringConfigurationOutput, error) {
if params == nil {
params = &PutBucketIntelligentTieringConfigurationInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go
index 03d79a0d8d..5933b6480a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go
@@ -14,48 +14,76 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. This implementation of
-// the PUT action adds an inventory configuration (identified by the inventory ID)
-// to the bucket. You can have up to 1,000 inventory configurations per bucket.
+// This operation is not supported by directory buckets.
+//
+// This implementation of the PUT action adds an inventory configuration
+// (identified by the inventory ID) to the bucket. You can have up to 1,000
+// inventory configurations per bucket.
+//
// Amazon S3 inventory generates inventories of the objects in the bucket on a
// daily or weekly basis, and the results are published to a flat file. The bucket
// that is inventoried is called the source bucket, and the bucket where the
// inventory flat file is stored is called the destination bucket. The destination
-// bucket must be in the same Amazon Web Services Region as the source bucket. When
-// you configure an inventory for a source bucket, you specify the destination
-// bucket where you want the inventory to be stored, and whether to generate the
-// inventory daily or weekly. You can also configure what object metadata to
-// include and whether to inventory all object versions or only current versions.
-// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html)
-// in the Amazon S3 User Guide. You must create a bucket policy on the destination
-// bucket to grant permissions to Amazon S3 to write objects to the bucket in the
-// defined location. For an example policy, see Granting Permissions for Amazon S3
-// Inventory and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9)
-// . Permissions To use this operation, you must have permission to perform the
+// bucket must be in the same Amazon Web Services Region as the source bucket.
+//
+// When you configure an inventory for a source bucket, you specify the
+// destination bucket where you want the inventory to be stored, and whether to
+// generate the inventory daily or weekly. You can also configure what object
+// metadata to include and whether to inventory all object versions or only current
+// versions. For more information, see [Amazon S3 Inventory]in the Amazon S3 User Guide.
+//
+// You must create a bucket policy on the destination bucket to grant permissions
+// to Amazon S3 to write objects to the bucket in the defined location. For an
+// example policy, see [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis].
+//
+// Permissions To use this operation, you must have permission to perform the
// s3:PutInventoryConfiguration action. The bucket owner has this permission by
-// default and can grant this permission to others. The
-// s3:PutInventoryConfiguration permission allows a user to create an S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html)
-// report that includes all object metadata fields available and to specify the
+// default and can grant this permission to others.
+//
+// The s3:PutInventoryConfiguration permission allows a user to create an [S3 Inventory] report
+// that includes all object metadata fields available and to specify the
// destination bucket to store the inventory. A user with read access to objects in
// the destination bucket can also access all object metadata fields that are
-// available in the inventory report. To restrict access to an inventory report,
-// see Restricting access to an Amazon S3 Inventory report (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10)
-// in the Amazon S3 User Guide. For more information about the metadata fields
-// available in S3 Inventory, see Amazon S3 Inventory lists (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents)
-// in the Amazon S3 User Guide. For more information about permissions, see
-// Permissions related to bucket subresource operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Identity and access management in Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// in the Amazon S3 User Guide. PutBucketInventoryConfiguration has the following
-// special errors: HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid
-// Argument HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are
-// attempting to create a new configuration but have already reached the
-// 1,000-configuration limit. HTTP 403 Forbidden Error Cause: You are not the owner
-// of the specified bucket, or you do not have the s3:PutInventoryConfiguration
-// bucket permission to set the configuration on the bucket. The following
-// operations are related to PutBucketInventoryConfiguration :
-// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html)
-// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html)
-// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html)
+// available in the inventory report.
+//
+// To restrict access to an inventory report, see [Restricting access to an Amazon S3 Inventory report] in the Amazon S3 User Guide.
+// For more information about the metadata fields available in S3 Inventory, see [Amazon S3 Inventory lists]
+// in the Amazon S3 User Guide. For more information about permissions, see [Permissions related to bucket subresource operations]and [Identity and access management in Amazon S3]
+// in the Amazon S3 User Guide.
+//
+// PutBucketInventoryConfiguration has the following special errors:
+//
+// HTTP 400 Bad Request Error Code: InvalidArgument
+//
+// Cause: Invalid Argument
+//
+// HTTP 400 Bad Request Error Code: TooManyConfigurations
+//
+// Cause: You are attempting to create a new configuration but have already
+// reached the 1,000-configuration limit.
+//
+// HTTP 403 Forbidden Error Cause: You are not the owner of the specified bucket,
+// or you do not have the s3:PutInventoryConfiguration bucket permission to set
+// the configuration on the bucket.
+//
+// The following operations are related to PutBucketInventoryConfiguration :
+//
+// [GetBucketInventoryConfiguration]
+//
+// [DeleteBucketInventoryConfiguration]
+//
+// [ListBucketInventoryConfigurations]
+//
+// [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9
+// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html
+// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html
+// [S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html
+// [Permissions related to bucket subresource operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html
+// [Identity and access management in Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [Restricting access to an Amazon S3 Inventory report]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10
+// [Amazon S3 Inventory lists]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents
+// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html
func (c *Client) PutBucketInventoryConfiguration(ctx context.Context, params *PutBucketInventoryConfigurationInput, optFns ...func(*Options)) (*PutBucketInventoryConfigurationOutput, error) {
if params == nil {
params = &PutBucketInventoryConfigurationInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go
index 88096fdd13..102077fc73 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go
@@ -15,26 +15,32 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Creates a new lifecycle
-// configuration for the bucket or replaces an existing lifecycle configuration.
-// Keep in mind that this will overwrite an existing lifecycle configuration, so if
-// you want to retain any configuration details, they must be included in the new
-// lifecycle configuration. For information about lifecycle configuration, see
-// Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html)
-// . Bucket lifecycle configuration now supports specifying a lifecycle rule using
+// This operation is not supported by directory buckets.
+//
+// Creates a new lifecycle configuration for the bucket or replaces an existing
+// lifecycle configuration. Keep in mind that this will overwrite an existing
+// lifecycle configuration, so if you want to retain any configuration details,
+// they must be included in the new lifecycle configuration. For information about
+// lifecycle configuration, see [Managing your storage lifecycle].
+//
+// Bucket lifecycle configuration now supports specifying a lifecycle rule using
// an object key name prefix, one or more object tags, object size, or any
// combination of these. Accordingly, this section describes the latest API. The
// previous version of the API supported filtering based only on an object key name
// prefix, which is supported for backward compatibility. For the related API
-// description, see PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)
-// . Rules You specify the lifecycle configuration in your request body. The
+// description, see [PutBucketLifecycle].
+//
+// Rules You specify the lifecycle configuration in your request body. The
// lifecycle configuration is specified as XML consisting of one or more rules. An
// Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not
// adjustable. Each rule consists of the following:
+//
// - A filter identifying a subset of objects to which the rule applies. The
// filter can be based on a key name prefix, object tags, object size, or any
// combination of these.
+//
// - A status indicating whether the rule is in effect.
+//
// - One or more lifecycle transition and expiration actions that you want
// Amazon S3 to perform on the objects identified by the filter. If the state of
// your bucket is versioning-enabled or versioning-suspended, you can have many
@@ -42,28 +48,44 @@ import (
// versions). Amazon S3 provides predefined actions that you can specify for
// current and noncurrent object versions.
//
-// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
-// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html)
-// . Permissions By default, all Amazon S3 resources are private, including
-// buckets, objects, and related subresources (for example, lifecycle configuration
-// and website configuration). Only the resource owner (that is, the Amazon Web
+// For more information, see [Object Lifecycle Management] and [Lifecycle Configuration Elements].
+//
+// Permissions By default, all Amazon S3 resources are private, including buckets,
+// objects, and related subresources (for example, lifecycle configuration and
+// website configuration). Only the resource owner (that is, the Amazon Web
// Services account that created it) can access the resource. The resource owner
// can optionally grant access permissions to others by writing an access policy.
-// For this operation, a user must get the s3:PutLifecycleConfiguration
-// permission. You can also explicitly deny permissions. An explicit deny also
-// supersedes any other permissions. If you want to block users or accounts from
-// removing or deleting objects from your bucket, you must deny them permissions
-// for the following actions:
+// For this operation, a user must get the s3:PutLifecycleConfiguration permission.
+//
+// You can also explicitly deny permissions. An explicit deny also supersedes any
+// other permissions. If you want to block users or accounts from removing or
+// deleting objects from your bucket, you must deny them permissions for the
+// following actions:
+//
// - s3:DeleteObject
+//
// - s3:DeleteObjectVersion
+//
// - s3:PutLifecycleConfiguration
//
-// For more information about permissions, see Managing Access Permissions to Your
-// Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// . The following operations are related to PutBucketLifecycleConfiguration :
-// - Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html)
-// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html)
-// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html)
+// For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// The following operations are related to PutBucketLifecycleConfiguration :
+//
+// [Examples of Lifecycle Configuration]
+//
+// [GetBucketLifecycleConfiguration]
+//
+// [DeleteBucketLifecycle]
+//
+// [Examples of Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html
+// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html
+// [Lifecycle Configuration Elements]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html
+// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html
+// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [DeleteBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html
+// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html
func (c *Client) PutBucketLifecycleConfiguration(ctx context.Context, params *PutBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*PutBucketLifecycleConfigurationOutput, error) {
if params == nil {
params = &PutBucketLifecycleConfigurationInput{}
@@ -90,10 +112,13 @@ type PutBucketLifecycleConfigurationInput struct {
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
- // request with the HTTP status code 400 Bad Request . For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
- // ignores any provided ChecksumAlgorithm parameter.
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
// The account ID of the expected bucket owner. If the account ID that you provide
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go
index fb80d2ee1b..e07c43221c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go
@@ -15,39 +15,68 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Set the logging
-// parameters for a bucket and to specify permissions for who can view and modify
-// the logging parameters. All logs are saved to buckets in the same Amazon Web
-// Services Region as the source bucket. To set the logging status of a bucket, you
-// must be the bucket owner. The bucket owner is automatically granted FULL_CONTROL
-// to all logs. You use the Grantee request element to grant access to other
-// people. The Permissions request element specifies the kind of access the
-// grantee has to the logs. If the target bucket for log delivery uses the bucket
-// owner enforced setting for S3 Object Ownership, you can't use the Grantee
-// request element to grant access to others. Permissions can only be granted using
-// policies. For more information, see Permissions for server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general)
-// in the Amazon S3 User Guide. Grantee Values You can specify the person (grantee)
-// to whom you're assigning access rights (by using request elements) in the
-// following ways:
-// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and
-// ignored in the request.
-// - By Email address: <>Grantees@email.com<> The grantee is resolved to the
-// CanonicalUser and, in a response to a GETObjectAcl request, appears as the
-// CanonicalUser.
-// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>
+// This operation is not supported by directory buckets.
+//
+// Set the logging parameters for a bucket and to specify permissions for who can
+// view and modify the logging parameters. All logs are saved to buckets in the
+// same Amazon Web Services Region as the source bucket. To set the logging status
+// of a bucket, you must be the bucket owner.
+//
+// The bucket owner is automatically granted FULL_CONTROL to all logs. You use the
+// Grantee request element to grant access to other people. The Permissions
+// request element specifies the kind of access the grantee has to the logs.
+//
+// If the target bucket for log delivery uses the bucket owner enforced setting
+// for S3 Object Ownership, you can't use the Grantee request element to grant
+// access to others. Permissions can only be granted using policies. For more
+// information, see [Permissions for server access log delivery]in the Amazon S3 User Guide.
+//
+// Grantee Values You can specify the person (grantee) to whom you're assigning
+// access rights (by using request elements) in the following ways:
+//
+// - By the person's ID:
+//
+// <>ID<><>GranteesEmail<>
+//
+// DisplayName is optional and ignored in the request.
+//
+// - By Email address:
+//
+// <>Grantees@email.com<>
+//
+// The grantee is resolved to the CanonicalUser and, in a response to a
+//
+// GETObjectAcl request, appears as the CanonicalUser.
+//
+// - By URI:
+//
+// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>
//
// To enable logging, you use LoggingEnabled and its children request elements. To
-// disable logging, you use an empty BucketLoggingStatus request element: For
-// more information about server access logging, see Server Access Logging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html)
-// in the Amazon S3 User Guide. For more information about creating a bucket, see
-// CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
-// . For more information about returning the logging status of a bucket, see
-// GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html)
-// . The following operations are related to PutBucketLogging :
-// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
-// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html)
-// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
-// - GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html)
+// disable logging, you use an empty BucketLoggingStatus request element:
+//
+// For more information about server access logging, see [Server Access Logging] in the Amazon S3 User
+// Guide.
+//
+// For more information about creating a bucket, see [CreateBucket]. For more information about
+// returning the logging status of a bucket, see [GetBucketLogging].
+//
+// The following operations are related to PutBucketLogging :
+//
+// [PutObject]
+//
+// [DeleteBucket]
+//
+// [CreateBucket]
+//
+// [GetBucketLogging]
+//
+// [Permissions for server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general
+// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
+// [GetBucketLogging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html
+// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
+// [Server Access Logging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html
func (c *Client) PutBucketLogging(ctx context.Context, params *PutBucketLoggingInput, optFns ...func(*Options)) (*PutBucketLoggingOutput, error) {
if params == nil {
params = &PutBucketLoggingInput{}
@@ -79,15 +108,19 @@ type PutBucketLoggingInput struct {
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
- // request with the HTTP status code 400 Bad Request . For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
- // ignores any provided ChecksumAlgorithm parameter.
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
- // The MD5 hash of the PutBucketLogging request body. For requests made using the
- // Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs,
- // this field is calculated automatically.
+ // The MD5 hash of the PutBucketLogging request body.
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
ContentMD5 *string
// The account ID of the expected bucket owner. If the account ID that you provide
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go
index bff1452b97..10b3b5978b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go
@@ -14,29 +14,44 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Sets a metrics
-// configuration (specified by the metrics configuration ID) for the bucket. You
-// can have up to 1,000 metrics configurations per bucket. If you're updating an
-// existing metrics configuration, note that this is a full replacement of the
-// existing metrics configuration. If you don't include the elements you want to
-// keep, they are erased. To use this operation, you must have permissions to
-// perform the s3:PutMetricsConfiguration action. The bucket owner has this
-// permission by default. The bucket owner can grant this permission to others. For
-// more information about permissions, see Permissions Related to Bucket
-// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// . For information about CloudWatch request metrics for Amazon S3, see
-// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html)
-// . The following operations are related to PutBucketMetricsConfiguration :
-// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html)
-// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html)
-// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html)
+// This operation is not supported by directory buckets.
+//
+// Sets a metrics configuration (specified by the metrics configuration ID) for
+// the bucket. You can have up to 1,000 metrics configurations per bucket. If
+// you're updating an existing metrics configuration, note that this is a full
+// replacement of the existing metrics configuration. If you don't include the
+// elements you want to keep, they are erased.
+//
+// To use this operation, you must have permissions to perform the
+// s3:PutMetricsConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch].
+//
+// The following operations are related to PutBucketMetricsConfiguration :
+//
+// [DeleteBucketMetricsConfiguration]
+//
+// [GetBucketMetricsConfiguration]
+//
+// [ListBucketMetricsConfigurations]
//
// PutBucketMetricsConfiguration has the following special error:
+//
// - Error code: TooManyConfigurations
+//
// - Description: You are attempting to create a new configuration but have
// already reached the 1,000-configuration limit.
+//
// - HTTP Status Code: HTTP 400 Bad Request
+//
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html
+// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html
+// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html
+// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
func (c *Client) PutBucketMetricsConfiguration(ctx context.Context, params *PutBucketMetricsConfigurationInput, optFns ...func(*Options)) (*PutBucketMetricsConfigurationOutput, error) {
if params == nil {
params = &PutBucketMetricsConfigurationInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go
index e937b5c594..f83abf02b0 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go
@@ -14,41 +14,59 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Enables notifications of
-// specified events for a bucket. For more information about event notifications,
-// see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
-// . Using this API, you can replace an existing notification configuration. The
+// This operation is not supported by directory buckets.
+//
+// Enables notifications of specified events for a bucket. For more information
+// about event notifications, see [Configuring Event Notifications].
+//
+// Using this API, you can replace an existing notification configuration. The
// configuration is an XML file that defines the event types that you want Amazon
// S3 to publish and the destination where you want Amazon S3 to publish an event
-// notification when it detects an event of the specified type. By default, your
-// bucket has no event notifications configured. That is, the notification
-// configuration will be an empty NotificationConfiguration . This action
-// replaces the existing notification configuration with the configuration you
-// include in the request body. After Amazon S3 receives this request, it first
-// verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon
-// Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner
-// has permission to publish to it by sending a test notification. In the case of
-// Lambda destinations, Amazon S3 verifies that the Lambda function permissions
-// grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For
-// more information, see Configuring Notifications for Amazon S3 Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
-// . You can disable notifications by adding the empty NotificationConfiguration
-// element. For more information about the number of event notification
-// configurations that you can create per bucket, see Amazon S3 service quotas (https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3)
-// in Amazon Web Services General Reference. By default, only the bucket owner can
-// configure notifications on a bucket. However, bucket owners can use a bucket
-// policy to grant permission to other users to set this configuration with the
-// required s3:PutBucketNotification permission. The PUT notification is an atomic
-// operation. For example, suppose your notification configuration includes SNS
-// topic, SQS queue, and Lambda function configurations. When you send a PUT
-// request with this configuration, Amazon S3 sends test messages to your SNS
-// topic. If the message fails, the entire PUT action will fail, and Amazon S3 will
-// not add the configuration to your bucket. If the configuration in the request
-// body includes only one TopicConfiguration specifying only the
-// s3:ReducedRedundancyLostObject event type, the response will also include the
-// x-amz-sns-test-message-id header containing the message ID of the test
-// notification sent to the topic. The following action is related to
-// PutBucketNotificationConfiguration :
-// - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html)
+// notification when it detects an event of the specified type.
+//
+// By default, your bucket has no event notifications configured. That is, the
+// notification configuration will be an empty NotificationConfiguration .
+//
+// This action replaces the existing notification configuration with the
+// configuration you include in the request body.
+//
+// After Amazon S3 receives this request, it first verifies that any Amazon Simple
+// Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS)
+// destination exists, and that the bucket owner has permission to publish to it by
+// sending a test notification. In the case of Lambda destinations, Amazon S3
+// verifies that the Lambda function permissions grant Amazon S3 permission to
+// invoke the function from the Amazon S3 bucket. For more information, see [Configuring Notifications for Amazon S3 Events].
+//
+// You can disable notifications by adding the empty NotificationConfiguration
+// element.
+//
+// For more information about the number of event notification configurations that
+// you can create per bucket, see [Amazon S3 service quotas]in Amazon Web Services General Reference.
+//
+// By default, only the bucket owner can configure notifications on a bucket.
+// However, bucket owners can use a bucket policy to grant permission to other
+// users to set this configuration with the required s3:PutBucketNotification
+// permission.
+//
+// The PUT notification is an atomic operation. For example, suppose your
+// notification configuration includes SNS topic, SQS queue, and Lambda function
+// configurations. When you send a PUT request with this configuration, Amazon S3
+// sends test messages to your SNS topic. If the message fails, the entire PUT
+// action will fail, and Amazon S3 will not add the configuration to your bucket.
+//
+// If the configuration in the request body includes only one TopicConfiguration
+// specifying only the s3:ReducedRedundancyLostObject event type, the response
+// will also include the x-amz-sns-test-message-id header containing the message
+// ID of the test notification sent to the topic.
+//
+// The following action is related to PutBucketNotificationConfiguration :
+//
+// [GetBucketNotificationConfiguration]
+//
+// [Configuring Notifications for Amazon S3 Events]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
+// [Amazon S3 service quotas]: https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3
+// [GetBucketNotificationConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html
+// [Configuring Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
func (c *Client) PutBucketNotificationConfiguration(ctx context.Context, params *PutBucketNotificationConfigurationInput, optFns ...func(*Options)) (*PutBucketNotificationConfigurationOutput, error) {
if params == nil {
params = &PutBucketNotificationConfigurationInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go
index 94875b7554..e3160fd583 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go
@@ -15,14 +15,22 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Creates or modifies
-// OwnershipControls for an Amazon S3 bucket. To use this operation, you must have
-// the s3:PutBucketOwnershipControls permission. For more information about Amazon
-// S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html)
-// . For information about Amazon S3 Object Ownership, see Using object ownership (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html)
-// . The following operations are related to PutBucketOwnershipControls :
-// - GetBucketOwnershipControls
-// - DeleteBucketOwnershipControls
+// This operation is not supported by directory buckets.
+//
+// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this
+// operation, you must have the s3:PutBucketOwnershipControls permission. For more
+// information about Amazon S3 permissions, see [Specifying permissions in a policy].
+//
+// For information about Amazon S3 Object Ownership, see [Using object ownership].
+//
+// The following operations are related to PutBucketOwnershipControls :
+//
+// # GetBucketOwnershipControls
+//
+// # DeleteBucketOwnershipControls
+//
+// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html
+// [Using object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html
func (c *Client) PutBucketOwnershipControls(ctx context.Context, params *PutBucketOwnershipControlsInput, optFns ...func(*Options)) (*PutBucketOwnershipControlsOutput, error) {
if params == nil {
params = &PutBucketOwnershipControlsInput{}
@@ -51,9 +59,10 @@ type PutBucketOwnershipControlsInput struct {
// This member is required.
OwnershipControls *types.OwnershipControls
- // The MD5 hash of the OwnershipControls request body. For requests made using the
- // Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs,
- // this field is calculated automatically.
+ // The MD5 hash of the OwnershipControls request body.
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
ContentMD5 *string
// The account ID of the expected bucket owner. If the account ID that you provide
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go
index 88e3f2633f..c00676ca3a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go
@@ -15,48 +15,64 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. Directory buckets -
-// For directory buckets, you must make requests for this API operation to the
-// Regional endpoint. These endpoints support path-style requests in the format
-// https://s3express-control.region_code.amazonaws.com/bucket-name .
-// Virtual-hosted-style requests aren't supported. For more information, see
-// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide. Permissions If you are using an identity other than
-// the root user of the Amazon Web Services account that owns the bucket, the
-// calling identity must both have the PutBucketPolicy permissions on the
-// specified bucket and belong to the bucket owner's account in order to use this
-// operation. If you don't have PutBucketPolicy permissions, Amazon S3 returns a
-// 403 Access Denied error. If you have the correct permissions, but you're not
-// using an identity that belongs to the bucket owner's account, Amazon S3 returns
-// a 405 Method Not Allowed error. To ensure that bucket owners don't
-// inadvertently lock themselves out of their own buckets, the root principal in a
-// bucket owner's Amazon Web Services account can perform the GetBucketPolicy ,
-// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket
-// policy explicitly denies the root principal's access. Bucket owner root
-// principals can only be blocked from performing these API actions by VPC endpoint
-// policies and Amazon Web Services Organizations policies.
+// Applies an Amazon S3 bucket policy to an Amazon S3 bucket.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Regional endpoint. These endpoints support path-style requests
+// in the format https://s3express-control.region_code.amazonaws.com/bucket-name .
+// Virtual-hosted-style requests aren't supported. For more information, see [Regional and Zonal endpoints]in
+// the Amazon S3 User Guide.
+//
+// Permissions If you are using an identity other than the root user of the Amazon
+// Web Services account that owns the bucket, the calling identity must both have
+// the PutBucketPolicy permissions on the specified bucket and belong to the
+// bucket owner's account in order to use this operation.
+//
+// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access
+// Denied error. If you have the correct permissions, but you're not using an
+// identity that belongs to the bucket owner's account, Amazon S3 returns a 405
+// Method Not Allowed error.
+//
+// To ensure that bucket owners don't inadvertently lock themselves out of their
+// own buckets, the root principal in a bucket owner's Amazon Web Services account
+// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API
+// actions, even if their bucket policy explicitly denies the root principal's
+// access. Bucket owner root principals can only be blocked from performing these
+// API actions by VPC endpoint policies and Amazon Web Services Organizations
+// policies.
+//
// - General purpose bucket permissions - The s3:PutBucketPolicy permission is
// required in a policy. For more information about general purpose buckets bucket
-// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html)
-// in the Amazon S3 User Guide.
+// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide.
+//
// - Directory bucket permissions - To grant access to this API operation, you
// must have the s3express:PutBucketPolicy permission in an IAM identity-based
// policy instead of a bucket policy. Cross-account access to this API operation
// isn't supported. This operation can only be performed by the Amazon Web Services
// account that owns the resource. For more information about directory bucket
-// policies and permissions, see Amazon Web Services Identity and Access
-// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html)
-// in the Amazon S3 User Guide.
+// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide.
+//
+// Example bucket policies General purpose buckets example bucket policies - See [Bucket policy examples]
+// in the Amazon S3 User Guide.
//
-// Example bucket policies General purpose buckets example bucket policies - See
-// Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html)
-// in the Amazon S3 User Guide. Directory bucket example bucket policies - See
-// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html)
-// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The
-// HTTP Host header syntax is s3express-control.region.amazonaws.com . The
-// following operations are related to PutBucketPolicy :
-// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
-// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html)
+// Directory bucket example bucket policies - See [Example bucket policies for S3 Express One Zone] in the Amazon S3 User Guide.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// s3express-control.region.amazonaws.com .
+//
+// The following operations are related to PutBucketPolicy :
+//
+// [CreateBucket]
+//
+// [DeleteBucket]
+//
+// [Bucket policy examples]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html
+// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
+// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
+// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html
func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInput, optFns ...func(*Options)) (*PutBucketPolicyOutput, error) {
if params == nil {
params = &PutBucketPolicyInput{}
@@ -74,21 +90,26 @@ func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInp
type PutBucketPolicyInput struct {
- // The name of the bucket. Directory buckets - When you use this operation with a
- // directory bucket, you must use path-style requests in the format
+ // The name of the bucket.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use path-style requests in the format
// https://s3express-control.region_code.amazonaws.com/bucket-name .
// Virtual-hosted-style requests aren't supported. Directory bucket names must be
// unique in the chosen Availability Zone. Bucket names must also follow the format
// bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
- // ). For information about bucket naming restrictions, see Directory bucket
- // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide
+ // ). For information about bucket naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User
+ // Guide
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
//
// This member is required.
Bucket *string
- // The bucket policy as a JSON document. For directory buckets, the only IAM
- // action supported in the bucket policy is s3express:CreateSession .
+ // The bucket policy as a JSON document.
+ //
+ // For directory buckets, the only IAM action supported in the bucket policy is
+ // s3express:CreateSession .
//
// This member is required.
Policy *string
@@ -97,39 +118,54 @@ type PutBucketPolicyInput struct {
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3
- // fails the request with the HTTP status code 400 Bad Request . For the
- // x-amz-checksum-algorithm header, replace algorithm with the supported
- // algorithm from the following list:
+ // fails the request with the HTTP status code 400 Bad Request .
+ //
+ // For the x-amz-checksum-algorithm header, replace algorithm with the
+ // supported algorithm from the following list:
+ //
// - CRC32
+ //
// - CRC32C
+ //
// - SHA1
+ //
// - SHA256
- // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If the individual checksum value you provide
- // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set
- // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided
- // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the
- // provided value in x-amz-checksum-algorithm . For directory buckets, when you
- // use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's
- // used for performance.
+ //
+ // For more information, see [Checking object integrity] in the Amazon S3 User Guide.
+ //
+ // If the individual checksum value you provide through x-amz-checksum-algorithm
+ // doesn't match the checksum algorithm you set through
+ // x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided ChecksumAlgorithm
+ // parameter and uses the checksum algorithm that matches the provided value in
+ // x-amz-checksum-algorithm .
+ //
+ // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the
+ // default checksum algorithm that's used for performance.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
// Set this parameter to true to confirm that you want to remove your permissions
- // to change this bucket policy in the future. This functionality is not supported
- // for directory buckets.
+ // to change this bucket policy in the future.
+ //
+ // This functionality is not supported for directory buckets.
ConfirmRemoveSelfBucketAccess *bool
- // The MD5 hash of the request body. For requests made using the Amazon Web
- // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is
- // calculated automatically. This functionality is not supported for directory
- // buckets.
+ // The MD5 hash of the request body.
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ //
+ // This functionality is not supported for directory buckets.
ContentMD5 *string
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
- // status code 403 Forbidden (access denied). For directory buckets, this header
- // is not supported in this API operation. If you specify this header, the request
- // fails with the HTTP status code 501 Not Implemented .
+ // status code 403 Forbidden (access denied).
+ //
+ // For directory buckets, this header is not supported in this API operation. If
+ // you specify this header, the request fails with the HTTP status code 501 Not
+ // Implemented .
ExpectedBucketOwner *string
noSmithyDocumentSerde
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go
index bf59164c03..00182a6bfb 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go
@@ -15,47 +15,71 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Creates a replication
-// configuration or replaces an existing one. For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html)
-// in the Amazon S3 User Guide. Specify the replication configuration in the
-// request body. In the replication configuration, you provide the name of the
-// destination bucket or buckets where you want Amazon S3 to replicate objects, the
-// IAM role that Amazon S3 can assume to replicate objects on your behalf, and
-// other relevant information. You can invoke this request for a specific Amazon
-// Web Services Region by using the aws:RequestedRegion (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion)
-// condition key. A replication configuration must include at least one rule, and
-// can contain a maximum of 1,000. Each rule identifies a subset of objects to
-// replicate by filtering the objects in the source bucket. To choose additional
-// subsets of objects to replicate, add a rule for each subset. To specify a subset
-// of the objects in the source bucket to apply a replication rule to, add the
-// Filter element as a child of the Rule element. You can filter objects based on
-// an object key prefix, one or more object tags, or both. When you add the Filter
-// element in the configuration, you must also add the following elements:
-// DeleteMarkerReplication , Status , and Priority . If you are using an earlier
-// version of the replication configuration, Amazon S3 handles replication of
-// delete markers differently. For more information, see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations)
-// . For information about enabling versioning on a bucket, see Using Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html)
-// . Handling Replication of Encrypted Objects By default, Amazon S3 doesn't
+// This operation is not supported by directory buckets.
+//
+// Creates a replication configuration or replaces an existing one. For more
+// information, see [Replication]in the Amazon S3 User Guide.
+//
+// Specify the replication configuration in the request body. In the replication
+// configuration, you provide the name of the destination bucket or buckets where
+// you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume
+// to replicate objects on your behalf, and other relevant information. You can
+// invoke this request for a specific Amazon Web Services Region by using the [aws:RequestedRegion]
+// aws:RequestedRegion condition key.
+//
+// A replication configuration must include at least one rule, and can contain a
+// maximum of 1,000. Each rule identifies a subset of objects to replicate by
+// filtering the objects in the source bucket. To choose additional subsets of
+// objects to replicate, add a rule for each subset.
+//
+// To specify a subset of the objects in the source bucket to apply a replication
+// rule to, add the Filter element as a child of the Rule element. You can filter
+// objects based on an object key prefix, one or more object tags, or both. When
+// you add the Filter element in the configuration, you must also add the following
+// elements: DeleteMarkerReplication , Status , and Priority .
+//
+// If you are using an earlier version of the replication configuration, Amazon S3
+// handles replication of delete markers differently. For more information, see [Backward Compatibility].
+//
+// For information about enabling versioning on a bucket, see [Using Versioning].
+//
+// Handling Replication of Encrypted Objects By default, Amazon S3 doesn't
// replicate objects that are stored at rest using server-side encryption with KMS
// keys. To replicate Amazon Web Services KMS-encrypted objects, add the following:
// SourceSelectionCriteria , SseKmsEncryptedObjects , Status ,
// EncryptionConfiguration , and ReplicaKmsKeyID . For information about
-// replication configuration, see Replicating Objects Created with SSE Using KMS
-// keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html)
-// . For information on PutBucketReplication errors, see List of
-// replication-related error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList)
+// replication configuration, see [Replicating Objects Created with SSE Using KMS keys].
+//
+// For information on PutBucketReplication errors, see [List of replication-related error codes]
+//
// Permissions To create a PutBucketReplication request, you must have
-// s3:PutReplicationConfiguration permissions for the bucket. By default, a
-// resource owner, in this case the Amazon Web Services account that created the
-// bucket, can perform this operation. The resource owner can also grant others
-// permissions to perform the operation. For more information about permissions,
-// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// . To perform this operation, the user or role performing the action must have
-// the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html)
-// permission. The following operations are related to PutBucketReplication :
-// - GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html)
-// - DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html)
+// s3:PutReplicationConfiguration permissions for the bucket.
+//
+// By default, a resource owner, in this case the Amazon Web Services account that
+// created the bucket, can perform this operation. The resource owner can also
+// grant others permissions to perform the operation. For more information about
+// permissions, see [Specifying Permissions in a Policy]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// To perform this operation, the user or role performing the action must have the [iam:PassRole]
+// permission.
+//
+// The following operations are related to PutBucketReplication :
+//
+// [GetBucketReplication]
+//
+// [DeleteBucketReplication]
+//
+// [iam:PassRole]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html
+// [GetBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html
+// [aws:RequestedRegion]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion
+// [Replicating Objects Created with SSE Using KMS keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html
+// [Using Versioning]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html
+// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html
+// [List of replication-related error codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList
+// [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations
+// [DeleteBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
func (c *Client) PutBucketReplication(ctx context.Context, params *PutBucketReplicationInput, optFns ...func(*Options)) (*PutBucketReplicationOutput, error) {
if params == nil {
params = &PutBucketReplicationInput{}
@@ -88,17 +112,23 @@ type PutBucketReplicationInput struct {
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
- // request with the HTTP status code 400 Bad Request . For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
- // ignores any provided ChecksumAlgorithm parameter.
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
// The base64-encoded 128-bit MD5 digest of the data. You must use this header as
// a message integrity check to verify that the request body was not corrupted in
- // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt)
- // . For requests made using the Amazon Web Services Command Line Interface (CLI)
- // or Amazon Web Services SDKs, this field is calculated automatically.
+ // transit. For more information, see [RFC 1864].
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ //
+ // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt
ContentMD5 *string
// The account ID of the expected bucket owner. If the account ID that you provide
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go
index 07e0f16399..57992d9d3a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go
@@ -15,14 +15,22 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Sets the request payment
-// configuration for a bucket. By default, the bucket owner pays for downloads from
-// the bucket. This configuration parameter enables the bucket owner (only) to
-// specify that the person requesting the download will be charged for the
-// download. For more information, see Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html)
-// . The following operations are related to PutBucketRequestPayment :
-// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
-// - GetBucketRequestPayment (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html)
+// This operation is not supported by directory buckets.
+//
+// Sets the request payment configuration for a bucket. By default, the bucket
+// owner pays for downloads from the bucket. This configuration parameter enables
+// the bucket owner (only) to specify that the person requesting the download will
+// be charged for the download. For more information, see [Requester Pays Buckets].
+//
+// The following operations are related to PutBucketRequestPayment :
+//
+// [CreateBucket]
+//
+// [GetBucketRequestPayment]
+//
+// [GetBucketRequestPayment]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html
+// [Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
func (c *Client) PutBucketRequestPayment(ctx context.Context, params *PutBucketRequestPaymentInput, optFns ...func(*Options)) (*PutBucketRequestPaymentOutput, error) {
if params == nil {
params = &PutBucketRequestPaymentInput{}
@@ -54,17 +62,23 @@ type PutBucketRequestPaymentInput struct {
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
- // request with the HTTP status code 400 Bad Request . For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
- // ignores any provided ChecksumAlgorithm parameter.
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
// The base64-encoded 128-bit MD5 digest of the data. You must use this header as
// a message integrity check to verify that the request body was not corrupted in
- // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt)
- // . For requests made using the Amazon Web Services Command Line Interface (CLI)
- // or Amazon Web Services SDKs, this field is calculated automatically.
+ // transit. For more information, see [RFC 1864].
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ //
+ // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt
ContentMD5 *string
// The account ID of the expected bucket owner. If the account ID that you provide
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go
index 0f0a6fd406..7a97414e5b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go
@@ -15,39 +15,54 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Sets the tags for a
-// bucket. Use tags to organize your Amazon Web Services bill to reflect your own
-// cost structure. To do this, sign up to get your Amazon Web Services account bill
-// with tag key values included. Then, to see the cost of combined resources,
-// organize your billing information according to resources with the same tag key
-// values. For example, you can tag several resources with a specific application
-// name, and then organize your billing information to see the total cost of that
-// application across several services. For more information, see Cost Allocation
-// and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html)
-// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html)
-// . When this operation sets the tags for a bucket, it will overwrite any current
+// This operation is not supported by directory buckets.
+//
+// Sets the tags for a bucket.
+//
+// Use tags to organize your Amazon Web Services bill to reflect your own cost
+// structure. To do this, sign up to get your Amazon Web Services account bill with
+// tag key values included. Then, to see the cost of combined resources, organize
+// your billing information according to resources with the same tag key values.
+// For example, you can tag several resources with a specific application name, and
+// then organize your billing information to see the total cost of that application
+// across several services. For more information, see [Cost Allocation and Tagging]and [Using Cost Allocation in Amazon S3 Bucket Tags].
+//
+// When this operation sets the tags for a bucket, it will overwrite any current
// tags the bucket already has. You cannot use this operation to add tags to an
-// existing list of tags. To use this operation, you must have permissions to
-// perform the s3:PutBucketTagging action. The bucket owner has this permission by
-// default and can grant this permission to others. For more information about
-// permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// . PutBucketTagging has the following special errors. For more Amazon S3 errors
-// see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html)
-// .
+// existing list of tags.
+//
+// To use this operation, you must have permissions to perform the
+// s3:PutBucketTagging action. The bucket owner has this permission by default and
+// can grant this permission to others. For more information about permissions, see
+// [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// PutBucketTagging has the following special errors. For more Amazon S3 errors
+// see, [Error Responses].
+//
// - InvalidTag - The tag provided was not a valid tag. This error can occur if
-// the tag did not pass input validation. For more information, see Using Cost
-// Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html)
-// .
+// the tag did not pass input validation. For more information, see [Using Cost Allocation in Amazon S3 Bucket Tags].
+//
// - MalformedXML - The XML provided does not match the schema.
+//
// - OperationAborted - A conflicting conditional action is currently in progress
// against this resource. Please try again.
+//
// - InternalError - The service was unable to apply the provided tag to the
// bucket.
//
// The following operations are related to PutBucketTagging :
-// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html)
-// - DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html)
+//
+// [GetBucketTagging]
+//
+// [DeleteBucketTagging]
+//
+// [Error Responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
+// [GetBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html
+// [Cost Allocation and Tagging]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [DeleteBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html
+// [Using Cost Allocation in Amazon S3 Bucket Tags]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
func (c *Client) PutBucketTagging(ctx context.Context, params *PutBucketTaggingInput, optFns ...func(*Options)) (*PutBucketTaggingOutput, error) {
if params == nil {
params = &PutBucketTaggingInput{}
@@ -79,17 +94,23 @@ type PutBucketTaggingInput struct {
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
- // request with the HTTP status code 400 Bad Request . For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
- // ignores any provided ChecksumAlgorithm parameter.
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
// The base64-encoded 128-bit MD5 digest of the data. You must use this header as
// a message integrity check to verify that the request body was not corrupted in
- // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt)
- // . For requests made using the Amazon Web Services Command Line Interface (CLI)
- // or Amazon Web Services SDKs, this field is calculated automatically.
+ // transit. For more information, see [RFC 1864].
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ //
+ // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt
ContentMD5 *string
// The account ID of the expected bucket owner. If the account ID that you provide
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go
index 495725cea9..9a5d520a79 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go
@@ -15,28 +15,47 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Sets the versioning state
-// of an existing bucket. You can set the versioning state with one of the
-// following values: Enabled—Enables versioning for the objects in the bucket. All
-// objects added to the bucket receive a unique version ID. Suspended—Disables
-// versioning for the objects in the bucket. All objects added to the bucket
-// receive the version ID null. If the versioning state has never been set on a
-// bucket, it has no versioning state; a GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html)
-// request does not return a versioning state value. In order to enable MFA Delete,
-// you must be the bucket owner. If you are the bucket owner and want to enable MFA
-// Delete in the bucket versioning configuration, you must include the x-amz-mfa
-// request header and the Status and the MfaDelete request elements in a request
-// to set the versioning state of the bucket. If you have an object expiration
-// lifecycle configuration in your non-versioned bucket and you want to maintain
-// the same permanent delete behavior when you enable versioning, you must add a
-// noncurrent expiration policy. The noncurrent expiration lifecycle configuration
-// will manage the deletes of the noncurrent object versions in the version-enabled
-// bucket. (A version-enabled bucket maintains one current and zero or more
-// noncurrent object versions.) For more information, see Lifecycle and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config)
-// . The following operations are related to PutBucketVersioning :
-// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
-// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html)
-// - GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html)
+// This operation is not supported by directory buckets.
+//
+// Sets the versioning state of an existing bucket.
+//
+// You can set the versioning state with one of the following values:
+//
+// Enabled—Enables versioning for the objects in the bucket. All objects added to
+// the bucket receive a unique version ID.
+//
+// Suspended—Disables versioning for the objects in the bucket. All objects added
+// to the bucket receive the version ID null.
+//
+// If the versioning state has never been set on a bucket, it has no versioning
+// state; a [GetBucketVersioning]request does not return a versioning state value.
+//
+// In order to enable MFA Delete, you must be the bucket owner. If you are the
+// bucket owner and want to enable MFA Delete in the bucket versioning
+// configuration, you must include the x-amz-mfa request header and the Status and
+// the MfaDelete request elements in a request to set the versioning state of the
+// bucket.
+//
+// If you have an object expiration lifecycle configuration in your non-versioned
+// bucket and you want to maintain the same permanent delete behavior when you
+// enable versioning, you must add a noncurrent expiration policy. The noncurrent
+// expiration lifecycle configuration will manage the deletes of the noncurrent
+// object versions in the version-enabled bucket. (A version-enabled bucket
+// maintains one current and zero or more noncurrent object versions.) For more
+// information, see [Lifecycle and Versioning].
+//
+// The following operations are related to PutBucketVersioning :
+//
+// [CreateBucket]
+//
+// [DeleteBucket]
+//
+// [GetBucketVersioning]
+//
+// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
+// [Lifecycle and Versioning]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config
+// [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html
func (c *Client) PutBucketVersioning(ctx context.Context, params *PutBucketVersioningInput, optFns ...func(*Options)) (*PutBucketVersioningOutput, error) {
if params == nil {
params = &PutBucketVersioningInput{}
@@ -68,17 +87,23 @@ type PutBucketVersioningInput struct {
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
- // request with the HTTP status code 400 Bad Request . For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
- // ignores any provided ChecksumAlgorithm parameter.
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
// >The base64-encoded 128-bit MD5 digest of the data. You must use this header as
// a message integrity check to verify that the request body was not corrupted in
- // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt)
- // . For requests made using the Amazon Web Services Command Line Interface (CLI)
- // or Amazon Web Services SDKs, this field is calculated automatically.
+ // transit. For more information, see [RFC 1864].
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ //
+ // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt
ContentMD5 *string
// The account ID of the expected bucket owner. If the account ID that you provide
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go
index 08c8a582f5..85d0214ab9 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go
@@ -15,21 +15,29 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Sets the configuration of
-// the website that is specified in the website subresource. To configure a bucket
-// as a website, you can add this subresource on the bucket with website
-// configuration information such as the file name of the index document and any
-// redirect rules. For more information, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html)
-// . This PUT action requires the S3:PutBucketWebsite permission. By default, only
+// This operation is not supported by directory buckets.
+//
+// Sets the configuration of the website that is specified in the website
+// subresource. To configure a bucket as a website, you can add this subresource on
+// the bucket with website configuration information such as the file name of the
+// index document and any redirect rules. For more information, see [Hosting Websites on Amazon S3].
+//
+// This PUT action requires the S3:PutBucketWebsite permission. By default, only
// the bucket owner can configure the website attached to a bucket; however, bucket
// owners can allow other users to set the website configuration by writing a
-// bucket policy that grants them the S3:PutBucketWebsite permission. To redirect
-// all website requests sent to the bucket's website endpoint, you add a website
-// configuration with the following elements. Because all requests are sent to
-// another website, you don't need to provide index document name for the bucket.
+// bucket policy that grants them the S3:PutBucketWebsite permission.
+//
+// To redirect all website requests sent to the bucket's website endpoint, you add
+// a website configuration with the following elements. Because all requests are
+// sent to another website, you don't need to provide index document name for the
+// bucket.
+//
// - WebsiteConfiguration
+//
// - RedirectAllRequestsTo
+//
// - HostName
+//
// - Protocol
//
// If you want granular control over redirects, you can use the following elements
@@ -37,27 +45,47 @@ import (
// information about the redirect destination. In this case, the website
// configuration must provide an index document for the bucket, because some
// requests might not be redirected.
+//
// - WebsiteConfiguration
+//
// - IndexDocument
+//
// - Suffix
+//
// - ErrorDocument
+//
// - Key
+//
// - RoutingRules
+//
// - RoutingRule
+//
// - Condition
+//
// - HttpErrorCodeReturnedEquals
+//
// - KeyPrefixEquals
+//
// - Redirect
+//
// - Protocol
+//
// - HostName
+//
// - ReplaceKeyPrefixWith
+//
// - ReplaceKeyWith
+//
// - HttpRedirectCode
//
// Amazon S3 has a limitation of 50 routing rules per website configuration. If
// you require more than 50 routing rules, you can use object redirect. For more
-// information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html)
-// in the Amazon S3 User Guide. The maximum request length is limited to 128 KB.
+// information, see [Configuring an Object Redirect]in the Amazon S3 User Guide.
+//
+// The maximum request length is limited to 128 KB.
+//
+// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html
+// [Configuring an Object Redirect]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html
func (c *Client) PutBucketWebsite(ctx context.Context, params *PutBucketWebsiteInput, optFns ...func(*Options)) (*PutBucketWebsiteOutput, error) {
if params == nil {
params = &PutBucketWebsiteInput{}
@@ -89,17 +117,23 @@ type PutBucketWebsiteInput struct {
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
- // request with the HTTP status code 400 Bad Request . For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
- // ignores any provided ChecksumAlgorithm parameter.
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
// The base64-encoded 128-bit MD5 digest of the data. You must use this header as
// a message integrity check to verify that the request body was not corrupted in
- // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt)
- // . For requests made using the Amazon Web Services Command Line Interface (CLI)
- // or Amazon Web Services SDKs, this field is calculated automatically.
+ // transit. For more information, see [RFC 1864].
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ //
+ // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt
ContentMD5 *string
// The account ID of the expected bucket owner. If the account ID that you provide
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go
index d57e0026ef..6a47996063 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go
@@ -18,51 +18,60 @@ import (
)
// Adds an object to a bucket.
+//
// - Amazon S3 never adds partial objects; if you receive a success response,
// Amazon S3 added the entire object to the bucket. You cannot use PutObject to
// only update a single piece of metadata for an existing object. You must put the
// entire object with updated metadata if you want to update some values.
+//
// - If your bucket uses the bucket owner enforced setting for Object Ownership,
// ACLs are disabled and no longer affect permissions. All objects written to the
// bucket by any account will be owned by the bucket owner.
+//
// - Directory buckets - For directory buckets, you must make requests for this
// API operation to the Zonal endpoint. These endpoints support
// virtual-hosted-style requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name .
-// Path-style requests are not supported. For more information, see Regional and
-// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide.
+// Path-style requests are not supported. For more information, see [Regional and Zonal endpoints]in the
+// Amazon S3 User Guide.
//
// Amazon S3 is a distributed system. If it receives multiple write requests for
// the same object simultaneously, it overwrites all but the last object written.
// However, Amazon S3 provides features that can modify this behavior:
+//
// - S3 Object Lock - To prevent objects from being deleted or overwritten, you
-// can use Amazon S3 Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html)
-// in the Amazon S3 User Guide. This functionality is not supported for directory
-// buckets.
+// can use [Amazon S3 Object Lock]in the Amazon S3 User Guide.
+//
+// This functionality is not supported for directory buckets.
+//
// - S3 Versioning - When you enable versioning for a bucket, if Amazon S3
// receives multiple write requests for the same object simultaneously, it stores
// all versions of the objects. For each write request that is made to the same
// object, Amazon S3 automatically generates a unique version ID of that object
// being stored in Amazon S3. You can retrieve, replace, or delete any version of
-// the object. For more information about versioning, see Adding Objects to
-// Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html)
-// in the Amazon S3 User Guide. For information about returning the versioning
-// state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html)
-// . This functionality is not supported for directory buckets.
+// the object. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets]in the Amazon S3 User
+// Guide. For information about returning the versioning state of a bucket, see [GetBucketVersioning]
+// .
+//
+// This functionality is not supported for directory buckets.
//
// Permissions
+//
// - General purpose bucket permissions - The following permissions are required
// in your policies when your PutObject request includes specific headers.
+//
// - s3:PutObject - To successfully complete the PutObject request, you must
// always have the s3:PutObject permission on a bucket to add an object to it.
+//
// - s3:PutObjectAcl - To successfully change the objects ACL of your PutObject
// request, you must have the s3:PutObjectAcl .
+//
// - s3:PutObjectTagging - To successfully set the tag-set with your PutObject
// request, you must have the s3:PutObjectTagging .
+//
// - Directory bucket permissions - To grant access to this API operation on a
-// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// API operation for session-based authorization. Specifically, you grant the
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
// s3express:CreateSession permission to the directory bucket in a bucket policy
// or an IAM identity-based policy. Then, you make the CreateSession API call on
// the bucket to obtain a session token. With the session token in your request
@@ -70,24 +79,36 @@ import (
// expires, you make another CreateSession API call to generate a new session
// token for use. Amazon Web Services CLI or SDKs create session and refresh the
// session token automatically to avoid service interruptions when a session
-// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// .
+// expires. For more information about authorization, see [CreateSession]CreateSession .
//
// Data integrity with Content-MD5
+//
// - General purpose bucket - To ensure that data is not corrupted traversing
// the network, use the Content-MD5 header. When you use this header, Amazon S3
// checks the object against the provided MD5 value and, if they do not match,
// Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5
// digest, you can calculate the MD5 while putting the object to Amazon S3 and
// compare the returned ETag to the calculated MD5 value.
+//
// - Directory bucket - This functionality is not supported for directory
// buckets.
//
-// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
-// Bucket_name.s3express-az_id.region.amazonaws.com . For more information about
-// related Amazon S3 APIs, see the following:
-// - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html)
-// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket_name.s3express-az_id.region.amazonaws.com .
+//
+// For more information about related Amazon S3 APIs, see the following:
+//
+// [CopyObject]
+//
+// [DeleteObject]
+//
+// [Amazon S3 Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html
+// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [Adding Objects to Versioning-Enabled Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html
+// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+// [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html
func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*Options)) (*PutObjectOutput, error) {
if params == nil {
params = &PutObjectInput{}
@@ -105,31 +126,39 @@ func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns .
type PutObjectInput struct {
- // The bucket name to which the PUT action was initiated. Directory buckets - When
- // you use this operation with a directory bucket, you must use
- // virtual-hosted-style requests in the format
+ // The bucket name to which the PUT action was initiated.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
- // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide. Access points - When you use this action with an
- // access point, you must provide the alias of the access point in place of the
- // bucket name or specify the access point ARN. When using the access point ARN,
- // you must direct requests to the access point hostname. The access point hostname
- // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. Access points and Object Lambda access points are
- // not supported by directory buckets. S3 on Outposts - When you use this action
- // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
- // hostname. The S3 on Outposts hostname takes the form
+ // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -139,26 +168,33 @@ type PutObjectInput struct {
// This member is required.
Key *string
- // The canned ACL to apply to the object. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL)
- // in the Amazon S3 User Guide. When adding a new object, you can use headers to
- // grant ACL-based permissions to individual Amazon Web Services accounts or to
- // predefined groups defined by Amazon S3. These permissions are then added to the
- // ACL on the object. By default, all objects are private. Only the owner has full
- // access control. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
- // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html)
- // in the Amazon S3 User Guide. If the bucket that you're uploading objects to uses
- // the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and
- // no longer affect permissions. Buckets that use this setting only accept PUT
- // requests that don't specify an ACL or PUT requests that specify bucket owner
- // full control ACLs, such as the bucket-owner-full-control canned ACL or an
- // equivalent form of this ACL expressed in the XML format. PUT requests that
- // contain other ACLs (for example, custom grants to certain Amazon Web Services
- // accounts) fail and return a 400 error with the error code
- // AccessControlListNotSupported . For more information, see Controlling
- // ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
- // in the Amazon S3 User Guide.
+ // The canned ACL to apply to the object. For more information, see [Canned ACL] in the Amazon
+ // S3 User Guide.
+ //
+ // When adding a new object, you can use headers to grant ACL-based permissions to
+ // individual Amazon Web Services accounts or to predefined groups defined by
+ // Amazon S3. These permissions are then added to the ACL on the object. By
+ // default, all objects are private. Only the owner has full access control. For
+ // more information, see [Access Control List (ACL) Overview]and [Managing ACLs Using the REST API] in the Amazon S3 User Guide.
+ //
+ // If the bucket that you're uploading objects to uses the bucket owner enforced
+ // setting for S3 Object Ownership, ACLs are disabled and no longer affect
+ // permissions. Buckets that use this setting only accept PUT requests that don't
+ // specify an ACL or PUT requests that specify bucket owner full control ACLs, such
+ // as the bucket-owner-full-control canned ACL or an equivalent form of this ACL
+ // expressed in the XML format. PUT requests that contain other ACLs (for example,
+ // custom grants to certain Amazon Web Services accounts) fail and return a 400
+ // error with the error code AccessControlListNotSupported . For more information,
+ // see [Controlling ownership of objects and disabling ACLs]in the Amazon S3 User Guide.
+ //
// - This functionality is not supported for directory buckets.
+ //
// - This functionality is not supported for Amazon S3 on Outposts.
+ //
+ // [Managing ACLs Using the REST API]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html
+ // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
+ // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
+ // [Controlling ownership of objects and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
ACL types.ObjectCannedACL
// Object data.
@@ -167,102 +203,124 @@ type PutObjectInput struct {
// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption
// with server-side encryption using Key Management Service (KMS) keys (SSE-KMS).
// Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object
- // encryption with SSE-KMS. Specifying this header with a PUT action doesn’t affect
- // bucket-level settings for S3 Bucket Key. This functionality is not supported for
- // directory buckets.
+ // encryption with SSE-KMS.
+ //
+ // Specifying this header with a PUT action doesn’t affect bucket-level settings
+ // for S3 Bucket Key.
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool
// Can be used to specify caching behavior along the request/reply chain. For more
- // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9)
- // .
+ // information, see [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9].
+ //
+ // [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9
CacheControl *string
// Indicates the algorithm used to create the checksum for the object when you use
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3
- // fails the request with the HTTP status code 400 Bad Request . For the
- // x-amz-checksum-algorithm header, replace algorithm with the supported
- // algorithm from the following list:
+ // fails the request with the HTTP status code 400 Bad Request .
+ //
+ // For the x-amz-checksum-algorithm header, replace algorithm with the
+ // supported algorithm from the following list:
+ //
// - CRC32
+ //
// - CRC32C
+ //
// - SHA1
+ //
// - SHA256
- // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If the individual checksum value you provide
- // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set
- // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided
- // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the
- // provided value in x-amz-checksum-algorithm . For directory buckets, when you
- // use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's
- // used for performance.
+ //
+ // For more information, see [Checking object integrity] in the Amazon S3 User Guide.
+ //
+ // If the individual checksum value you provide through x-amz-checksum-algorithm
+ // doesn't match the checksum algorithm you set through
+ // x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided ChecksumAlgorithm
+ // parameter and uses the checksum algorithm that matches the provided value in
+ // x-amz-checksum-algorithm .
+ //
+ // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the
+ // default checksum algorithm that's used for performance.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
- // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity]
// in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumCRC32 *string
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
- // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity]
// in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumCRC32C *string
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
- // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see [Checking object integrity]
// in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumSHA1 *string
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
- // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see [Checking object integrity]
// in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumSHA256 *string
- // Specifies presentational information for the object. For more information, see
- // https://www.rfc-editor.org/rfc/rfc6266#section-4 (https://www.rfc-editor.org/rfc/rfc6266#section-4)
- // .
+ // Specifies presentational information for the object. For more information, see [https://www.rfc-editor.org/rfc/rfc6266#section-4].
+ //
+ // [https://www.rfc-editor.org/rfc/rfc6266#section-4]: https://www.rfc-editor.org/rfc/rfc6266#section-4
ContentDisposition *string
// Specifies what content encodings have been applied to the object and thus what
// decoding mechanisms must be applied to obtain the media-type referenced by the
- // Content-Type header field. For more information, see
- // https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding (https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding)
- // .
+ // Content-Type header field. For more information, see [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding].
+ //
+ // [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding
ContentEncoding *string
// The language the content is in.
ContentLanguage *string
// Size of the body in bytes. This parameter is useful when the size of the body
- // cannot be determined automatically. For more information, see
- // https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length)
- // .
+ // cannot be determined automatically. For more information, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length].
+ //
+ // [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length
ContentLength *int64
// The base64-encoded 128-bit MD5 digest of the message (without the headers)
// according to RFC 1864. This header can be used as a message integrity check to
// verify that the data is the same data that was originally sent. Although it is
// optional, we recommend using the Content-MD5 mechanism as an end-to-end
- // integrity check. For more information about REST request authentication, see
- // REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html)
- // . The Content-MD5 header is required for any request to upload an object with a
+ // integrity check. For more information about REST request authentication, see [REST Authentication].
+ //
+ // The Content-MD5 header is required for any request to upload an object with a
// retention period configured using Amazon S3 Object Lock. For more information
- // about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // about Amazon S3 Object Lock, see [Amazon S3 Object Lock Overview]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
+ // [Amazon S3 Object Lock Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html
ContentMD5 *string
// A standard MIME type describing the format of the contents. For more
- // information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type)
- // .
+ // information, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type].
+ //
+ // [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type
ContentType *string
// The account ID of the expected bucket owner. If the account ID that you provide
@@ -271,27 +329,36 @@ type PutObjectInput struct {
ExpectedBucketOwner *string
// The date and time at which the object is no longer cacheable. For more
- // information, see https://www.rfc-editor.org/rfc/rfc7234#section-5.3 (https://www.rfc-editor.org/rfc/rfc7234#section-5.3)
- // .
+ // information, see [https://www.rfc-editor.org/rfc/rfc7234#section-5.3].
+ //
+ // [https://www.rfc-editor.org/rfc/rfc7234#section-5.3]: https://www.rfc-editor.org/rfc/rfc7234#section-5.3
Expires *time.Time
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ //
// - This functionality is not supported for directory buckets.
+ //
// - This functionality is not supported for Amazon S3 on Outposts.
GrantFullControl *string
// Allows grantee to read the object data and its metadata.
+ //
// - This functionality is not supported for directory buckets.
+ //
// - This functionality is not supported for Amazon S3 on Outposts.
GrantRead *string
// Allows grantee to read the object ACL.
+ //
// - This functionality is not supported for directory buckets.
+ //
// - This functionality is not supported for Amazon S3 on Outposts.
GrantReadACP *string
// Allows grantee to write the ACL for the applicable object.
+ //
// - This functionality is not supported for directory buckets.
+ //
// - This functionality is not supported for Amazon S3 on Outposts.
GrantWriteACP *string
@@ -299,46 +366,55 @@ type PutObjectInput struct {
Metadata map[string]string
// Specifies whether a legal hold will be applied to this object. For more
- // information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // information about S3 Object Lock, see [Object Lock]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html
ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
- // The Object Lock mode that you want to apply to this object. This functionality
- // is not supported for directory buckets.
+ // The Object Lock mode that you want to apply to this object.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockMode types.ObjectLockMode
// The date and time when you want this object's Object Lock to expire. Must be
- // formatted as a timestamp parameter. This functionality is not supported for
- // directory buckets.
+ // formatted as a timestamp parameter.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockRetainUntilDate *time.Time
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
- // Specifies the algorithm to use when encrypting the object (for example, AES256
- // ). This functionality is not supported for directory buckets.
+ // Specifies the algorithm to use when encrypting the object (for example, AES256 ).
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string
// Specifies the customer-provided encryption key for Amazon S3 to use in
// encrypting data. This value is used to store the object and then it is
// discarded; Amazon S3 does not store the encryption key. The key must be
// appropriate for use with the algorithm specified in the
- // x-amz-server-side-encryption-customer-algorithm header. This functionality is
- // not supported for directory buckets.
+ // x-amz-server-side-encryption-customer-algorithm header.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
- // encryption key was transmitted without error. This functionality is not
- // supported for directory buckets.
+ // encryption key was transmitted without error.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string
// Specifies the Amazon Web Services KMS Encryption Context to use for object
@@ -346,8 +422,9 @@ type PutObjectInput struct {
// JSON with the encryption context key-value pairs. This value is stored as object
// metadata and automatically gets passed on to Amazon Web Services KMS for future
// GetObject or CopyObject operations on this object. This value must be
- // explicitly added during CopyObject operations. This functionality is not
- // supported for directory buckets.
+ // explicitly added during CopyObject operations.
+ //
+ // This functionality is not supported for directory buckets.
SSEKMSEncryptionContext *string
// If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse ,
@@ -358,53 +435,72 @@ type PutObjectInput struct {
// x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web
// Services managed key ( aws/s3 ) to protect the data. If the KMS key does not
// exist in the same account that's issuing the command, you must use the full ARN
- // and not just the ID. This functionality is not supported for directory buckets.
+ // and not just the ID.
+ //
+ // This functionality is not supported for directory buckets.
SSEKMSKeyId *string
// The server-side encryption algorithm that was used when you store this object
- // in Amazon S3 (for example, AES256 , aws:kms , aws:kms:dsse ). General purpose
- // buckets - You have four mutually exclusive options to protect data using
- // server-side encryption in Amazon S3, depending on how you choose to manage the
- // encryption keys. Specifically, the encryption key options are Amazon S3 managed
- // keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and
- // customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side
+ // in Amazon S3 (for example, AES256 , aws:kms , aws:kms:dsse ).
+ //
+ // General purpose buckets - You have four mutually exclusive options to protect
+ // data using server-side encryption in Amazon S3, depending on how you choose to
+ // manage the encryption keys. Specifically, the encryption key options are Amazon
+ // S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS),
+ // and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side
// encryption by using Amazon S3 managed keys (SSE-S3) by default. You can
// optionally tell Amazon S3 to encrypt data at rest by using server-side
- // encryption with other key options. For more information, see Using Server-Side
- // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html)
- // in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the
- // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) value is
- // supported.
+ // encryption with other key options. For more information, see [Using Server-Side Encryption]in the Amazon S3
+ // User Guide.
+ //
+ // Directory buckets - For directory buckets, only the server-side encryption with
+ // Amazon S3 managed keys (SSE-S3) ( AES256 ) value is supported.
+ //
+ // [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html
ServerSideEncryption types.ServerSideEncryption
// By default, Amazon S3 uses the STANDARD Storage Class to store newly created
// objects. The STANDARD storage class provides high durability and high
// availability. Depending on performance needs, you can specify a different
- // Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
- // in the Amazon S3 User Guide.
+ // Storage Class. For more information, see [Storage Classes]in the Amazon S3 User Guide.
+ //
// - For directory buckets, only the S3 Express One Zone storage class is
// supported to store newly created objects.
+ //
// - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class.
+ //
+ // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
StorageClass types.StorageClass
// The tag-set for the object. The tag-set must be encoded as URL Query
- // parameters. (For example, "Key1=Value1") This functionality is not supported for
- // directory buckets.
+ // parameters. (For example, "Key1=Value1")
+ //
+ // This functionality is not supported for directory buckets.
Tagging *string
// If the bucket is configured as a website, redirects requests for this object to
// another object in the same bucket or to an external URL. Amazon S3 stores the
// value of this header in the object metadata. For information about object
- // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html)
- // in the Amazon S3 User Guide. In the following example, the request header sets
- // the redirect to an object (anotherPage.html) in the same bucket:
- // x-amz-website-redirect-location: /anotherPage.html In the following example, the
- // request header sets the object redirect to another website:
- // x-amz-website-redirect-location: http://www.example.com/ For more information
- // about website hosting in Amazon S3, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html)
- // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // metadata, see [Object Key and Metadata]in the Amazon S3 User Guide.
+ //
+ // In the following example, the request header sets the redirect to an object
+ // (anotherPage.html) in the same bucket:
+ //
+ // x-amz-website-redirect-location: /anotherPage.html
+ //
+ // In the following example, the request header sets the object redirect to
+ // another website:
+ //
+ // x-amz-website-redirect-location: http://www.example.com/
+ //
+ // For more information about website hosting in Amazon S3, see [Hosting Websites on Amazon S3] and [How to Configure Website Page Redirects] in the
+ // Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [How to Configure Website Page Redirects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html
+ // [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html
+ // [Object Key and Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
WebsiteRedirectLocation *string
noSmithyDocumentSerde
@@ -419,8 +515,9 @@ func (in *PutObjectInput) bindEndpointParams(p *EndpointParameters) {
type PutObjectOutput struct {
// Indicates whether the uploaded object uses an S3 Bucket Key for server-side
- // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality
- // is not supported for directory buckets.
+ // encryption with Key Management Service (KMS) keys (SSE-KMS).
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool
// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
@@ -428,8 +525,10 @@ type PutObjectOutput struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumCRC32 *string
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
@@ -437,8 +536,10 @@ type PutObjectOutput struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumCRC32C *string
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
@@ -446,8 +547,10 @@ type PutObjectOutput struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumSHA1 *string
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
@@ -455,71 +558,89 @@ type PutObjectOutput struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumSHA256 *string
- // Entity tag for the uploaded object. General purpose buckets - To ensure that
- // data is not corrupted traversing the network, for objects where the ETag is the
- // MD5 digest of the object, you can calculate the MD5 while putting an object to
- // Amazon S3 and compare the returned ETag to the calculated MD5 value. Directory
- // buckets - The ETag for the object in a directory bucket isn't the MD5 digest of
- // the object.
+ // Entity tag for the uploaded object.
+ //
+ // General purpose buckets - To ensure that data is not corrupted traversing the
+ // network, for objects where the ETag is the MD5 digest of the object, you can
+ // calculate the MD5 while putting an object to Amazon S3 and compare the returned
+ // ETag to the calculated MD5 value.
+ //
+ // Directory buckets - The ETag for the object in a directory bucket isn't the MD5
+ // digest of the object.
ETag *string
- // If the expiration is configured for the object (see
- // PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)
- // ) in the Amazon S3 User Guide, the response includes this header. It includes
- // the expiry-date and rule-id key-value pairs that provide information about
- // object expiration. The value of the rule-id is URL-encoded. This functionality
- // is not supported for directory buckets.
+ // If the expiration is configured for the object (see [PutBucketLifecycleConfiguration]) in the Amazon S3 User
+ // Guide, the response includes this header. It includes the expiry-date and
+ // rule-id key-value pairs that provide information about object expiration. The
+ // value of the rule-id is URL-encoded.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
Expiration *string
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// If server-side encryption with a customer-provided encryption key was
// requested, the response will include this header to confirm the encryption
- // algorithm that's used. This functionality is not supported for directory
- // buckets.
+ // algorithm that's used.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string
// If server-side encryption with a customer-provided encryption key was
// requested, the response will include this header to provide the round-trip
- // message integrity verification of the customer-provided encryption key. This
- // functionality is not supported for directory buckets.
+ // message integrity verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string
// If present, indicates the Amazon Web Services KMS Encryption Context to use for
// object encryption. The value of this header is a base64-encoded UTF-8 string
// holding JSON with the encryption context key-value pairs. This value is stored
// as object metadata and automatically gets passed on to Amazon Web Services KMS
- // for future GetObject or CopyObject operations on this object. This
- // functionality is not supported for directory buckets.
+ // for future GetObject or CopyObject operations on this object.
+ //
+ // This functionality is not supported for directory buckets.
SSEKMSEncryptionContext *string
// If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse ,
// this header indicates the ID of the Key Management Service (KMS) symmetric
- // encryption customer managed key that was used for the object. This functionality
- // is not supported for directory buckets.
+ // encryption customer managed key that was used for the object.
+ //
+ // This functionality is not supported for directory buckets.
SSEKMSKeyId *string
// The server-side encryption algorithm used when you store this object in Amazon
- // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only
- // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is
- // supported.
+ // S3 (for example, AES256 , aws:kms , aws:kms:dsse ).
+ //
+ // For directory buckets, only server-side encryption with Amazon S3 managed keys
+ // (SSE-S3) ( AES256 ) is supported.
ServerSideEncryption types.ServerSideEncryption
- // Version ID of the object. If you enable versioning for a bucket, Amazon S3
- // automatically generates a unique version ID for the object being stored. Amazon
- // S3 returns this ID in the response. When you enable versioning for a bucket, if
- // Amazon S3 receives multiple write requests for the same object simultaneously,
- // it stores all of the objects. For more information about versioning, see Adding
- // Objects to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html)
- // in the Amazon S3 User Guide. For information about returning the versioning
- // state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html)
- // . This functionality is not supported for directory buckets.
+ // Version ID of the object.
+ //
+ // If you enable versioning for a bucket, Amazon S3 automatically generates a
+ // unique version ID for the object being stored. Amazon S3 returns this ID in the
+ // response. When you enable versioning for a bucket, if Amazon S3 receives
+ // multiple write requests for the same object simultaneously, it stores all of the
+ // objects. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets]in the Amazon S3 User
+ // Guide. For information about returning the versioning state of a bucket, see [GetBucketVersioning].
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Adding Objects to Versioning-Enabled Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html
+ // [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html
VersionId *string
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go
index 08fea12c1b..bebccdced7 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go
@@ -14,87 +14,152 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Uses the acl subresource
-// to set the access control list (ACL) permissions for a new or existing object in
-// an S3 bucket. You must have the WRITE_ACP permission to set the ACL of an
-// object. For more information, see What permissions can I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions)
-// in the Amazon S3 User Guide. This functionality is not supported for Amazon S3
-// on Outposts. Depending on your application needs, you can choose to set the ACL
-// on an object using either the request body or the headers. For example, if you
-// have an existing application that updates a bucket ACL using the request body,
-// you can continue to use that approach. For more information, see Access Control
-// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
-// in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced
-// setting for S3 Object Ownership, ACLs are disabled and no longer affect
-// permissions. You must use policies to grant access to your bucket and the
-// objects in it. Requests to set ACLs or update ACLs fail and return the
-// AccessControlListNotSupported error code. Requests to read ACLs are still
-// supported. For more information, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
-// in the Amazon S3 User Guide. Permissions You can set access permissions using
-// one of the following methods:
+// This operation is not supported by directory buckets.
+//
+// Uses the acl subresource to set the access control list (ACL) permissions for a
+// new or existing object in an S3 bucket. You must have the WRITE_ACP permission
+// to set the ACL of an object. For more information, see [What permissions can I grant?]in the Amazon S3 User
+// Guide.
+//
+// This functionality is not supported for Amazon S3 on Outposts.
+//
+// Depending on your application needs, you can choose to set the ACL on an object
+// using either the request body or the headers. For example, if you have an
+// existing application that updates a bucket ACL using the request body, you can
+// continue to use that approach. For more information, see [Access Control List (ACL) Overview]in the Amazon S3 User
+// Guide.
+//
+// If your bucket uses the bucket owner enforced setting for S3 Object Ownership,
+// ACLs are disabled and no longer affect permissions. You must use policies to
+// grant access to your bucket and the objects in it. Requests to set ACLs or
+// update ACLs fail and return the AccessControlListNotSupported error code.
+// Requests to read ACLs are still supported. For more information, see [Controlling object ownership]in the
+// Amazon S3 User Guide.
+//
+// Permissions You can set access permissions using one of the following methods:
+//
// - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a
// set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined
// set of grantees and permissions. Specify the canned ACL name as the value of
// x-amz-ac l. If you use this header, you cannot use other access
-// control-specific headers in your request. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL)
-// .
+// control-specific headers in your request. For more information, see [Canned ACL].
+//
// - Specify access permissions explicitly with the x-amz-grant-read ,
// x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control
// headers. When using these headers, you specify explicit access permissions and
// grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the
// permission. If you use these ACL-specific headers, you cannot use x-amz-acl
// header to set a canned ACL. These parameters map to the set of permissions that
-// Amazon S3 supports in an ACL. For more information, see Access Control List
-// (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
-// . You specify each grantee as a type=value pair, where the type is one of the
-// following:
-// - id – if the value specified is the canonical user ID of an Amazon Web
-// Services account
-// - uri – if you are granting permissions to a predefined group
-// - emailAddress – if the value specified is the email address of an Amazon Web
-// Services account Using email addresses to specify a grantee is only supported in
-// the following Amazon Web Services Regions:
-// - US East (N. Virginia)
-// - US West (N. California)
-// - US West (Oregon)
-// - Asia Pacific (Singapore)
-// - Asia Pacific (Sydney)
-// - Asia Pacific (Tokyo)
-// - Europe (Ireland)
-// - South America (São Paulo) For a list of all the Amazon S3 supported Regions
-// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
-// in the Amazon Web Services General Reference. For example, the following
-// x-amz-grant-read header grants list objects permission to the two Amazon Web
-// Services accounts identified by their email addresses. x-amz-grant-read:
-// emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com"
+// Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview].
+//
+// You specify each grantee as a type=value pair, where the type is one of the
+//
+// following:
+//
+// - id – if the value specified is the canonical user ID of an Amazon Web
+// Services account
+//
+// - uri – if you are granting permissions to a predefined group
+//
+// - emailAddress – if the value specified is the email address of an Amazon Web
+// Services account
+//
+// Using email addresses to specify a grantee is only supported in the following
+//
+// Amazon Web Services Regions:
+//
+// - US East (N. Virginia)
+//
+// - US West (N. California)
+//
+// - US West (Oregon)
+//
+// - Asia Pacific (Singapore)
+//
+// - Asia Pacific (Sydney)
+//
+// - Asia Pacific (Tokyo)
+//
+// - Europe (Ireland)
+//
+// - South America (São Paulo)
+//
+// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the
+//
+// Amazon Web Services General Reference.
+//
+// For example, the following x-amz-grant-read header grants list objects
+//
+// permission to the two Amazon Web Services accounts identified by their email
+// addresses.
+//
+// x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com"
//
// You can use either a canned ACL or specify access permissions explicitly. You
-// cannot do both. Grantee Values You can specify the person (grantee) to whom
-// you're assigning access rights (using request elements) in the following ways:
-// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and
-// ignored in the request.
-// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>
-// - By Email address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved
-// to the CanonicalUser and, in a response to a GET Object acl request, appears as
-// the CanonicalUser. Using email addresses to specify a grantee is only supported
-// in the following Amazon Web Services Regions:
-// - US East (N. Virginia)
-// - US West (N. California)
-// - US West (Oregon)
-// - Asia Pacific (Singapore)
-// - Asia Pacific (Sydney)
-// - Asia Pacific (Tokyo)
-// - Europe (Ireland)
-// - South America (São Paulo) For a list of all the Amazon S3 supported Regions
-// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
-// in the Amazon Web Services General Reference.
+// cannot do both.
+//
+// Grantee Values You can specify the person (grantee) to whom you're assigning
+// access rights (using request elements) in the following ways:
+//
+// - By the person's ID:
+//
+// <>ID<><>GranteesEmail<>
+//
+// DisplayName is optional and ignored in the request.
+//
+// - By URI:
+//
+// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>
+//
+// - By Email address:
+//
+// <>Grantees@email.com<>lt;/Grantee>
+//
+// The grantee is resolved to the CanonicalUser and, in a response to a GET Object
+//
+// acl request, appears as the CanonicalUser.
+//
+// Using email addresses to specify a grantee is only supported in the following
+//
+// Amazon Web Services Regions:
+//
+// - US East (N. Virginia)
+//
+// - US West (N. California)
+//
+// - US West (Oregon)
+//
+// - Asia Pacific (Singapore)
+//
+// - Asia Pacific (Sydney)
+//
+// - Asia Pacific (Tokyo)
+//
+// - Europe (Ireland)
+//
+// - South America (São Paulo)
+//
+// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the
+//
+// Amazon Web Services General Reference.
//
// Versioning The ACL of an object is set at the object version level. By default,
// PUT sets the ACL of the current version of an object. To set the ACL of a
-// different version, use the versionId subresource. The following operations are
-// related to PutObjectAcl :
-// - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html)
-// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
+// different version, use the versionId subresource.
+//
+// The following operations are related to PutObjectAcl :
+//
+// [CopyObject]
+//
+// [GetObject]
+//
+// [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
+// [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
+// [Controlling object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
+// [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
+// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
+// [What permissions can I grant?]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, optFns ...func(*Options)) (*PutObjectAclOutput, error) {
if params == nil {
params = &PutObjectAclInput{}
@@ -113,6 +178,7 @@ func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, op
type PutObjectAclInput struct {
// The bucket name that contains the object to which you want to attach the ACL.
+ //
// Access points - When you use this action with an access point, you must provide
// the alias of the access point in place of the bucket name or specify the access
// point ARN. When using the access point ARN, you must direct requests to the
@@ -120,15 +186,18 @@ type PutObjectAclInput struct {
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
- // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with
- // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
- // The S3 on Outposts hostname takes the form
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -138,8 +207,9 @@ type PutObjectAclInput struct {
// This member is required.
Key *string
- // The canned ACL to apply to the object. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL)
- // .
+ // The canned ACL to apply to the object. For more information, see [Canned ACL].
+ //
+ // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
ACL types.ObjectCannedACL
// Contains the elements that set the ACL permissions for an object per grantee.
@@ -149,17 +219,23 @@ type PutObjectAclInput struct {
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
- // request with the HTTP status code 400 Bad Request . For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
- // ignores any provided ChecksumAlgorithm parameter.
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
// The base64-encoded 128-bit MD5 digest of the data. This header must be used as
// a message integrity check to verify that the request body was not corrupted in
- // transit. For more information, go to RFC 1864.> (http://www.ietf.org/rfc/rfc1864.txt)
+ // transit. For more information, go to [RFC 1864.>]
+ //
// For requests made using the Amazon Web Services Command Line Interface (CLI) or
// Amazon Web Services SDKs, this field is calculated automatically.
+ //
+ // [RFC 1864.>]: http://www.ietf.org/rfc/rfc1864.txt
ContentMD5 *string
// The account ID of the expected bucket owner. If the account ID that you provide
@@ -168,38 +244,47 @@ type PutObjectAclInput struct {
ExpectedBucketOwner *string
// Allows grantee the read, write, read ACP, and write ACP permissions on the
- // bucket. This functionality is not supported for Amazon S3 on Outposts.
+ // bucket.
+ //
+ // This functionality is not supported for Amazon S3 on Outposts.
GrantFullControl *string
- // Allows grantee to list the objects in the bucket. This functionality is not
- // supported for Amazon S3 on Outposts.
+ // Allows grantee to list the objects in the bucket.
+ //
+ // This functionality is not supported for Amazon S3 on Outposts.
GrantRead *string
- // Allows grantee to read the bucket ACL. This functionality is not supported for
- // Amazon S3 on Outposts.
+ // Allows grantee to read the bucket ACL.
+ //
+ // This functionality is not supported for Amazon S3 on Outposts.
GrantReadACP *string
- // Allows grantee to create new objects in the bucket. For the bucket and object
- // owners of existing objects, also allows deletions and overwrites of those
- // objects.
+ // Allows grantee to create new objects in the bucket.
+ //
+ // For the bucket and object owners of existing objects, also allows deletions and
+ // overwrites of those objects.
GrantWrite *string
- // Allows grantee to write the ACL for the applicable bucket. This functionality
- // is not supported for Amazon S3 on Outposts.
+ // Allows grantee to write the ACL for the applicable bucket.
+ //
+ // This functionality is not supported for Amazon S3 on Outposts.
GrantWriteACP *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
- // Version ID used to reference a specific version of the object. This
- // functionality is not supported for directory buckets.
+ // Version ID used to reference a specific version of the object.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string
noSmithyDocumentSerde
@@ -214,7 +299,9 @@ func (in *PutObjectAclInput) bindEndpointParams(p *EndpointParameters) {
type PutObjectAclOutput struct {
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go
index cc23509f8b..98f3dcbf86 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go
@@ -14,9 +14,14 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Applies a legal hold
-// configuration to the specified object. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
-// . This functionality is not supported for Amazon S3 on Outposts.
+// This operation is not supported by directory buckets.
+//
+// Applies a legal hold configuration to the specified object. For more
+// information, see [Locking Objects].
+//
+// This functionality is not supported for Amazon S3 on Outposts.
+//
+// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html
func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalHoldInput, optFns ...func(*Options)) (*PutObjectLegalHoldOutput, error) {
if params == nil {
params = &PutObjectLegalHoldInput{}
@@ -35,6 +40,7 @@ func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalH
type PutObjectLegalHoldInput struct {
// The bucket name containing the object that you want to place a legal hold on.
+ //
// Access points - When you use this action with an access point, you must provide
// the alias of the access point in place of the bucket name or specify the access
// point ARN. When using the access point ARN, you must direct requests to the
@@ -42,8 +48,9 @@ type PutObjectLegalHoldInput struct {
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
- // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide.
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -57,15 +64,19 @@ type PutObjectLegalHoldInput struct {
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
- // request with the HTTP status code 400 Bad Request . For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
- // ignores any provided ChecksumAlgorithm parameter.
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
- // The MD5 hash for the request body. For requests made using the Amazon Web
- // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is
- // calculated automatically.
+ // The MD5 hash for the request body.
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
ContentMD5 *string
// The account ID of the expected bucket owner. If the account ID that you provide
@@ -81,10 +92,12 @@ type PutObjectLegalHoldInput struct {
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
// The version ID of the object that you want to place a legal hold on.
@@ -101,7 +114,9 @@ func (in *PutObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) {
type PutObjectLegalHoldOutput struct {
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go
index 358ececc6d..0eb948a965 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go
@@ -14,17 +14,22 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Places an Object Lock
-// configuration on the specified bucket. The rule specified in the Object Lock
-// configuration will be applied by default to every new object placed in the
-// specified bucket. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
-// .
+// This operation is not supported by directory buckets.
+//
+// Places an Object Lock configuration on the specified bucket. The rule specified
+// in the Object Lock configuration will be applied by default to every new object
+// placed in the specified bucket. For more information, see [Locking Objects].
+//
// - The DefaultRetention settings require both a mode and a period.
+//
// - The DefaultRetention period can be either Days or Years but you must select
// one. You cannot specify Days and Years at the same time.
+//
// - You can enable Object Lock for new or existing buckets. For more
-// information, see Configuring Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html)
-// .
+// information, see [Configuring Object Lock].
+//
+// [Configuring Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html
+// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html
func (c *Client) PutObjectLockConfiguration(ctx context.Context, params *PutObjectLockConfigurationInput, optFns ...func(*Options)) (*PutObjectLockConfigurationOutput, error) {
if params == nil {
params = &PutObjectLockConfigurationInput{}
@@ -51,15 +56,19 @@ type PutObjectLockConfigurationInput struct {
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
- // request with the HTTP status code 400 Bad Request . For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
- // ignores any provided ChecksumAlgorithm parameter.
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
- // The MD5 hash for the request body. For requests made using the Amazon Web
- // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is
- // calculated automatically.
+ // The MD5 hash for the request body.
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
ContentMD5 *string
// The account ID of the expected bucket owner. If the account ID that you provide
@@ -74,10 +83,12 @@ type PutObjectLockConfigurationInput struct {
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
// A token to allow Object Lock to be enabled for an existing bucket.
@@ -94,7 +105,9 @@ func (in *PutObjectLockConfigurationInput) bindEndpointParams(p *EndpointParamet
type PutObjectLockConfigurationOutput struct {
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go
index eb787de489..d3c6ed4f5a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go
@@ -14,12 +14,16 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Places an Object
-// Retention configuration on an object. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
-// . Users or accounts require the s3:PutObjectRetention permission in order to
-// place an Object Retention configuration on objects. Bypassing a Governance
+// This operation is not supported by directory buckets.
+//
+// Places an Object Retention configuration on an object. For more information,
+// see [Locking Objects]. Users or accounts require the s3:PutObjectRetention permission in order
+// to place an Object Retention configuration on objects. Bypassing a Governance
// Retention configuration requires the s3:BypassGovernanceRetention permission.
+//
// This functionality is not supported for Amazon S3 on Outposts.
+//
+// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html
func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetentionInput, optFns ...func(*Options)) (*PutObjectRetentionOutput, error) {
if params == nil {
params = &PutObjectRetentionInput{}
@@ -38,15 +42,18 @@ func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetent
type PutObjectRetentionInput struct {
// The bucket name that contains the object you want to apply this Object
- // Retention configuration to. Access points - When you use this action with an
- // access point, you must provide the alias of the access point in place of the
- // bucket name or specify the access point ARN. When using the access point ARN,
- // you must direct requests to the access point hostname. The access point hostname
- // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide.
+ // Retention configuration to.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -64,15 +71,19 @@ type PutObjectRetentionInput struct {
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
- // request with the HTTP status code 400 Bad Request . For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
- // ignores any provided ChecksumAlgorithm parameter.
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
- // The MD5 hash for the request body. For requests made using the Amazon Web
- // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is
- // calculated automatically.
+ // The MD5 hash for the request body.
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
ContentMD5 *string
// The account ID of the expected bucket owner. If the account ID that you provide
@@ -84,10 +95,12 @@ type PutObjectRetentionInput struct {
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
// The container element for the Object Retention configuration.
@@ -108,7 +121,9 @@ func (in *PutObjectRetentionInput) bindEndpointParams(p *EndpointParameters) {
type PutObjectRetentionOutput struct {
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go
index 2768db5025..6c43900c4b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go
@@ -14,35 +14,50 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Sets the supplied tag-set
-// to an object that already exists in a bucket. A tag is a key-value pair. For
-// more information, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html)
-// . You can associate tags with an object by sending a PUT request against the
+// This operation is not supported by directory buckets.
+//
+// Sets the supplied tag-set to an object that already exists in a bucket. A tag
+// is a key-value pair. For more information, see [Object Tagging].
+//
+// You can associate tags with an object by sending a PUT request against the
// tagging subresource that is associated with the object. You can retrieve tags by
-// sending a GET request. For more information, see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html)
-// . For tagging-related restrictions related to characters and encodings, see Tag
-// Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html)
-// . Note that Amazon S3 limits the maximum number of tags to 10 tags per object.
+// sending a GET request. For more information, see [GetObjectTagging].
+//
+// For tagging-related restrictions related to characters and encodings, see [Tag Restrictions].
+// Note that Amazon S3 limits the maximum number of tags to 10 tags per object.
+//
// To use this operation, you must have permission to perform the
// s3:PutObjectTagging action. By default, the bucket owner has this permission and
-// can grant this permission to others. To put tags of any other version, use the
-// versionId query parameter. You also need permission for the
-// s3:PutObjectVersionTagging action. PutObjectTagging has the following special
-// errors. For more Amazon S3 errors see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html)
-// .
+// can grant this permission to others.
+//
+// To put tags of any other version, use the versionId query parameter. You also
+// need permission for the s3:PutObjectVersionTagging action.
+//
+// PutObjectTagging has the following special errors. For more Amazon S3 errors
+// see, [Error Responses].
+//
// - InvalidTag - The tag provided was not a valid tag. This error can occur if
-// the tag did not pass input validation. For more information, see Object
-// Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html)
-// .
+// the tag did not pass input validation. For more information, see [Object Tagging].
+//
// - MalformedXML - The XML provided does not match the schema.
+//
// - OperationAborted - A conflicting conditional action is currently in progress
// against this resource. Please try again.
+//
// - InternalError - The service was unable to apply the provided tag to the
// object.
//
// The following operations are related to PutObjectTagging :
-// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html)
-// - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html)
+//
+// [GetObjectTagging]
+//
+// [DeleteObjectTagging]
+//
+// [Error Responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
+// [DeleteObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html
+// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html
+// [Tag Restrictions]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html
+// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html
func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingInput, optFns ...func(*Options)) (*PutObjectTaggingOutput, error) {
if params == nil {
params = &PutObjectTaggingInput{}
@@ -60,23 +75,27 @@ func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingI
type PutObjectTaggingInput struct {
- // The bucket name containing the object. Access points - When you use this action
- // with an access point, you must provide the alias of the access point in place of
- // the bucket name or specify the access point ARN. When using the access point
- // ARN, you must direct requests to the access point hostname. The access point
- // hostname takes the form
+ // The bucket name containing the object.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
- // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with
- // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
- // The S3 on Outposts hostname takes the form
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -95,15 +114,19 @@ type PutObjectTaggingInput struct {
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
- // request with the HTTP status code 400 Bad Request . For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
- // ignores any provided ChecksumAlgorithm parameter.
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
- // The MD5 hash for the request body. For requests made using the Amazon Web
- // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is
- // calculated automatically.
+ // The MD5 hash for the request body.
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
ContentMD5 *string
// The account ID of the expected bucket owner. If the account ID that you provide
@@ -115,10 +138,12 @@ type PutObjectTaggingInput struct {
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
// The versionId of the object that the tag-set will be added to.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go
index 7e6d0788e4..009c3a3177 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go
@@ -15,22 +15,38 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Creates or modifies the
-// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation,
-// you must have the s3:PutBucketPublicAccessBlock permission. For more
-// information about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
-// . When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or
-// an object, it checks the PublicAccessBlock configuration for both the bucket
-// (or the bucket that contains the object) and the bucket owner's account. If the
+// This operation is not supported by directory buckets.
+//
+// Creates or modifies the PublicAccessBlock configuration for an Amazon S3
+// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock
+// permission. For more information about Amazon S3 permissions, see [Specifying Permissions in a Policy].
+//
+// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an
+// object, it checks the PublicAccessBlock configuration for both the bucket (or
+// the bucket that contains the object) and the bucket owner's account. If the
// PublicAccessBlock configurations are different between the bucket and the
// account, Amazon S3 uses the most restrictive combination of the bucket-level and
-// account-level settings. For more information about when Amazon S3 considers a
-// bucket or an object public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status)
-// . The following operations are related to PutPublicAccessBlock :
-// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html)
-// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html)
-// - GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html)
-// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html)
+// account-level settings.
+//
+// For more information about when Amazon S3 considers a bucket or an object
+// public, see [The Meaning of "Public"].
+//
+// The following operations are related to PutPublicAccessBlock :
+//
+// [GetPublicAccessBlock]
+//
+// [DeletePublicAccessBlock]
+//
+// [GetBucketPolicyStatus]
+//
+// [Using Amazon S3 Block Public Access]
+//
+// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html
+// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html
+// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html
+// [GetBucketPolicyStatus]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html
+// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
+// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status
func (c *Client) PutPublicAccessBlock(ctx context.Context, params *PutPublicAccessBlockInput, optFns ...func(*Options)) (*PutPublicAccessBlockOutput, error) {
if params == nil {
params = &PutPublicAccessBlockInput{}
@@ -56,9 +72,10 @@ type PutPublicAccessBlockInput struct {
// The PublicAccessBlock configuration that you want to apply to this Amazon S3
// bucket. You can enable the configuration options in any combination. For more
- // information about when Amazon S3 considers a bucket or object public, see The
- // Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status)
- // in the Amazon S3 User Guide.
+ // information about when Amazon S3 considers a bucket or object public, see [The Meaning of "Public"]in
+ // the Amazon S3 User Guide.
+ //
+ // [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status
//
// This member is required.
PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration
@@ -67,15 +84,19 @@ type PutPublicAccessBlockInput struct {
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
- // request with the HTTP status code 400 Bad Request . For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
- // ignores any provided ChecksumAlgorithm parameter.
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
- // The MD5 hash of the PutPublicAccessBlock request body. For requests made using
- // the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services
- // SDKs, this field is calculated automatically.
+ // The MD5 hash of the PutPublicAccessBlock request body.
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
ContentMD5 *string
// The account ID of the expected bucket owner. If the account ID that you provide
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go
index 3b6aad85b8..18bb6d03b9 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go
@@ -14,40 +14,51 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// This operation is not supported by directory buckets. Restores an archived copy
-// of an object back into Amazon S3 This functionality is not supported for Amazon
-// S3 on Outposts. This action performs the following types of requests:
+// This operation is not supported by directory buckets.
+//
+// # Restores an archived copy of an object back into Amazon S3
+//
+// This functionality is not supported for Amazon S3 on Outposts.
+//
+// This action performs the following types of requests:
+//
// - restore an archive - Restore an archived object
//
// For more information about the S3 structure in the request body, see the
// following:
-// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
-// - Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html)
-// in the Amazon S3 User Guide
-// - Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html)
-// in the Amazon S3 User Guide
+//
+// [PutObject]
+//
+// [Managing Access with ACLs]
+// - in the Amazon S3 User Guide
+//
+// [Protecting Data Using Server-Side Encryption]
+// - in the Amazon S3 User Guide
//
// Permissions To use this operation, you must have permissions to perform the
// s3:RestoreObject action. The bucket owner has this permission by default and can
-// grant this permission to others. For more information about permissions, see
-// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// in the Amazon S3 User Guide. Restoring objects Objects that you archive to the
-// S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive
-// storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep
-// Archive tiers, are not accessible in real time. For objects in the S3 Glacier
-// Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage
-// classes, you must first initiate a restore request, and then wait until a
-// temporary copy of the object is available. If you want a permanent copy of the
-// object, create a copy of it in the Amazon S3 Standard storage class in your S3
-// bucket. To access an archived object, you must restore the object for the
-// duration (number of days) that you specify. For objects in the Archive Access or
-// Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a
-// restore request, and then wait until the object is moved into the Frequent
-// Access tier. To restore a specific object version, you can provide a version ID.
-// If you don't provide a version ID, Amazon S3 restores the current version. When
-// restoring an archived object, you can specify one of the following data access
-// tier options in the Tier element of the request body:
+// grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations]
+// and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide.
+//
+// Restoring objects Objects that you archive to the S3 Glacier Flexible Retrieval
+// Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3
+// Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are
+// not accessible in real time. For objects in the S3 Glacier Flexible Retrieval
+// Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first
+// initiate a restore request, and then wait until a temporary copy of the object
+// is available. If you want a permanent copy of the object, create a copy of it in
+// the Amazon S3 Standard storage class in your S3 bucket. To access an archived
+// object, you must restore the object for the duration (number of days) that you
+// specify. For objects in the Archive Access or Deep Archive Access tiers of S3
+// Intelligent-Tiering, you must first initiate a restore request, and then wait
+// until the object is moved into the Frequent Access tier.
+//
+// To restore a specific object version, you can provide a version ID. If you
+// don't provide a version ID, Amazon S3 restores the current version.
+//
+// When restoring an archived object, you can specify one of the following data
+// access tier options in the Tier element of the request body:
+//
// - Expedited - Expedited retrievals allow you to quickly access your data
// stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or
// S3 Intelligent-Tiering Archive tier when occasional urgent requests for
@@ -57,6 +68,7 @@ import (
// Expedited retrievals is available when you need it. Expedited retrievals and
// provisioned capacity are not available for objects stored in the S3 Glacier Deep
// Archive storage class or S3 Intelligent-Tiering Deep Archive tier.
+//
// - Standard - Standard retrievals allow you to access any of your archived
// objects within several hours. This is the default option for retrieval requests
// that do not specify the retrieval option. Standard retrievals typically finish
@@ -65,6 +77,7 @@ import (
// typically finish within 12 hours for objects stored in the S3 Glacier Deep
// Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard
// retrievals are free for objects stored in S3 Intelligent-Tiering.
+//
// - Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible
// Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve
// large amounts, even petabytes, of data at no cost. Bulk retrievals typically
@@ -76,29 +89,33 @@ import (
// Deep Archive tier.
//
// For more information about archive retrieval options and provisioned capacity
-// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html)
-// in the Amazon S3 User Guide. You can use Amazon S3 restore speed upgrade to
-// change the restore speed to a faster speed while it is in progress. For more
-// information, see Upgrading the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html)
-// in the Amazon S3 User Guide. To get the status of object restoration, you can
-// send a HEAD request. Operations return the x-amz-restore header, which provides
-// information about the restoration status, in the response. You can use Amazon S3
-// event notifications to notify you when a restore is initiated or completed. For
-// more information, see Configuring Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
-// in the Amazon S3 User Guide. After restoring an archived object, you can update
-// the restoration period by reissuing the request with a new period. Amazon S3
-// updates the restoration period relative to the current time and charges only for
-// the request-there are no data transfer charges. You cannot update the
-// restoration period when Amazon S3 is actively processing your current restore
-// request for the object. If your bucket has a lifecycle configuration with a rule
-// that includes an expiration action, the object expiration overrides the life
-// span that you specify in a restore request. For example, if you restore an
-// object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon
-// S3 deletes the object in 3 days. For more information about lifecycle
-// configuration, see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)
-// and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
-// in Amazon S3 User Guide. Responses A successful action returns either the 200 OK
-// or 202 Accepted status code.
+// for Expedited data access, see [Restoring Archived Objects] in the Amazon S3 User Guide.
+//
+// You can use Amazon S3 restore speed upgrade to change the restore speed to a
+// faster speed while it is in progress. For more information, see [Upgrading the speed of an in-progress restore]in the Amazon
+// S3 User Guide.
+//
+// To get the status of object restoration, you can send a HEAD request.
+// Operations return the x-amz-restore header, which provides information about
+// the restoration status, in the response. You can use Amazon S3 event
+// notifications to notify you when a restore is initiated or completed. For more
+// information, see [Configuring Amazon S3 Event Notifications]in the Amazon S3 User Guide.
+//
+// After restoring an archived object, you can update the restoration period by
+// reissuing the request with a new period. Amazon S3 updates the restoration
+// period relative to the current time and charges only for the request-there are
+// no data transfer charges. You cannot update the restoration period when Amazon
+// S3 is actively processing your current restore request for the object.
+//
+// If your bucket has a lifecycle configuration with a rule that includes an
+// expiration action, the object expiration overrides the life span that you
+// specify in a restore request. For example, if you restore an object copy for 10
+// days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the
+// object in 3 days. For more information about lifecycle configuration, see [PutBucketLifecycleConfiguration]and [Object Lifecycle Management]
+// in Amazon S3 User Guide.
+//
+// Responses A successful action returns either the 200 OK or 202 Accepted status
+// code.
//
// - If the object is not previously restored, then Amazon S3 returns 202
// Accepted in the response.
@@ -128,8 +145,22 @@ import (
// - SOAP Fault Code Prefix: N/A
//
// The following operations are related to RestoreObject :
-// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)
-// - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html)
+//
+// [PutBucketLifecycleConfiguration]
+//
+// [GetBucketNotificationConfiguration]
+//
+// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
+// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Configuring Amazon S3 Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
+// [Managing Access with ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html
+// [Protecting Data Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html
+// [GetBucketNotificationConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html
+// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
+// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [Upgrading the speed of an in-progress restore]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html
func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput, optFns ...func(*Options)) (*RestoreObjectOutput, error) {
if params == nil {
params = &RestoreObjectInput{}
@@ -147,23 +178,27 @@ func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput,
type RestoreObjectInput struct {
- // The bucket name containing the object to restore. Access points - When you use
- // this action with an access point, you must provide the alias of the access point
- // in place of the bucket name or specify the access point ARN. When using the
- // access point ARN, you must direct requests to the access point hostname. The
- // access point hostname takes the form
+ // The bucket name containing the object to restore.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
- // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with
- // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
- // The S3 on Outposts hostname takes the form
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -177,10 +212,13 @@ type RestoreObjectInput struct {
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
- // request with the HTTP status code 400 Bad Request . For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
- // ignores any provided ChecksumAlgorithm parameter.
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
// The account ID of the expected bucket owner. If the account ID that you provide
@@ -192,10 +230,12 @@ type RestoreObjectInput struct {
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
// Container for restore job parameters.
@@ -215,7 +255,9 @@ func (in *RestoreObjectInput) bindEndpointParams(p *EndpointParameters) {
type RestoreObjectOutput struct {
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// Indicates the path in the provided S3 output location where Select results will
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go
index f69db696ac..584aa16ab5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go
@@ -14,67 +14,94 @@ import (
"sync"
)
-// This operation is not supported by directory buckets. This action filters the
-// contents of an Amazon S3 object based on a simple structured query language
-// (SQL) statement. In the request, along with the SQL expression, you must also
-// specify a data serialization format (JSON, CSV, or Apache Parquet) of the
-// object. Amazon S3 uses this format to parse object data into records, and
-// returns only records that match the specified SQL expression. You must also
-// specify the data serialization format for the response. This functionality is
-// not supported for Amazon S3 on Outposts. For more information about Amazon S3
-// Select, see Selecting Content from Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html)
-// and SELECT Command (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html)
-// in the Amazon S3 User Guide. Permissions You must have the s3:GetObject
-// permission for this operation. Amazon S3 Select does not support anonymous
-// access. For more information about permissions, see Specifying Permissions in a
-// Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
-// in the Amazon S3 User Guide. Object Data Formats You can use Amazon S3 Select to
-// query objects that have the following format properties:
+// This operation is not supported by directory buckets.
+//
+// This action filters the contents of an Amazon S3 object based on a simple
+// structured query language (SQL) statement. In the request, along with the SQL
+// expression, you must also specify a data serialization format (JSON, CSV, or
+// Apache Parquet) of the object. Amazon S3 uses this format to parse object data
+// into records, and returns only records that match the specified SQL expression.
+// You must also specify the data serialization format for the response.
+//
+// This functionality is not supported for Amazon S3 on Outposts.
+//
+// For more information about Amazon S3 Select, see [Selecting Content from Objects] and [SELECT Command] in the Amazon S3 User
+// Guide.
+//
+// Permissions You must have the s3:GetObject permission for this operation.
+// Amazon S3 Select does not support anonymous access. For more information about
+// permissions, see [Specifying Permissions in a Policy]in the Amazon S3 User Guide.
+//
+// Object Data Formats You can use Amazon S3 Select to query objects that have the
+// following format properties:
+//
// - CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.
+//
// - UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.
+//
// - GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2.
// GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports
// for CSV and JSON files. Amazon S3 Select supports columnar compression for
// Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object
// compression for Parquet objects.
+//
// - Server-side encryption - Amazon S3 Select supports querying objects that
-// are protected with server-side encryption. For objects that are encrypted with
-// customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use
-// the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
-// . For more information about SSE-C, see Server-Side Encryption (Using
-// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
-// in the Amazon S3 User Guide. For objects that are encrypted with Amazon S3
-// managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side
-// encryption is handled transparently, so you don't need to specify anything. For
-// more information about server-side encryption, including SSE-S3 and SSE-KMS, see
-// Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html)
-// in the Amazon S3 User Guide.
+// are protected with server-side encryption.
+//
+// For objects that are encrypted with customer-provided encryption keys (SSE-C),
+//
+// you must use HTTPS, and you must use the headers that are documented in the [GetObject].
+// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)]in the Amazon S3 User Guide.
+//
+// For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon
+//
+// Web Services KMS keys (SSE-KMS), server-side encryption is handled
+// transparently, so you don't need to specify anything. For more information about
+// server-side encryption, including SSE-S3 and SSE-KMS, see [Protecting Data Using Server-Side Encryption]in the Amazon S3
+// User Guide.
//
// Working with the Response Body Given the response size is unknown, Amazon S3
// Select streams the response as a series of messages and includes a
// Transfer-Encoding header with chunked as its value in the response. For more
-// information, see Appendix: SelectObjectContent Response (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html)
-// . GetObject Support The SelectObjectContent action does not support the
-// following GetObject functionality. For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
-// .
+// information, see [Appendix: SelectObjectContent Response].
+//
+// GetObject Support The SelectObjectContent action does not support the following
+// GetObject functionality. For more information, see [GetObject].
+//
// - Range : Although you can specify a scan range for an Amazon S3 Select
-// request (see SelectObjectContentRequest - ScanRange (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange)
-// in the request parameters), you cannot specify the range of bytes of an object
-// to return.
+// request (see [SelectObjectContentRequest - ScanRange]in the request parameters), you cannot specify the range of
+// bytes of an object to return.
+//
// - The GLACIER , DEEP_ARCHIVE , and REDUCED_REDUNDANCY storage classes, or the
// ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING
// storage class: You cannot query objects in the GLACIER , DEEP_ARCHIVE , or
// REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS or
// DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class. For
-// more information about storage classes, see Using Amazon S3 storage classes (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html)
-// in the Amazon S3 User Guide.
+// more information about storage classes, see [Using Amazon S3 storage classes]in the Amazon S3 User Guide.
+//
+// Special Errors For a list of special errors for this operation, see [List of SELECT Object Content Error Codes]
//
-// Special Errors For a list of special errors for this operation, see List of
-// SELECT Object Content Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList)
// The following operations are related to SelectObjectContent :
-// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
-// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html)
-// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)
+//
+// [GetObject]
+//
+// [GetBucketLifecycleConfiguration]
+//
+// [PutBucketLifecycleConfiguration]
+//
+// [Appendix: SelectObjectContent Response]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html
+// [Selecting Content from Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html
+// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
+// [SelectObjectContentRequest - ScanRange]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange
+// [List of SELECT Object Content Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList
+// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html
+// [Using Amazon S3 storage classes]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html
+// [SELECT Command]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
+//
+// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
+// [Protecting Data Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html
func (c *Client) SelectObjectContent(ctx context.Context, params *SelectObjectContentInput, optFns ...func(*Options)) (*SelectObjectContentOutput, error) {
if params == nil {
params = &SelectObjectContentInput{}
@@ -95,9 +122,9 @@ func (c *Client) SelectObjectContent(ctx context.Context, params *SelectObjectCo
// expression, you must specify a data serialization format (JSON or CSV) of the
// object. Amazon S3 uses this to parse object data into records. It returns only
// records that match the specified SQL expression. You must also specify the data
-// serialization format for the response. For more information, see S3Select API
-// Documentation (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html)
-// .
+// serialization format for the response. For more information, see [S3Select API Documentation].
+//
+// [S3Select API Documentation]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
type SelectObjectContentInput struct {
// The S3 bucket.
@@ -140,30 +167,37 @@ type SelectObjectContentInput struct {
// The server-side encryption (SSE) algorithm used to encrypt the object. This
// parameter is needed only when the object was created using a checksum algorithm.
- // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
- // in the Amazon S3 User Guide.
+ // For more information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide.
+ //
+ // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
SSECustomerAlgorithm *string
// The server-side encryption (SSE) customer managed key. This parameter is needed
// only when the object was created using a checksum algorithm. For more
- // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
- // in the Amazon S3 User Guide.
+ // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide.
+ //
+ // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
SSECustomerKey *string
// The MD5 server-side encryption (SSE) customer managed key. This parameter is
// needed only when the object was created using a checksum algorithm. For more
- // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
- // in the Amazon S3 User Guide.
+ // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide.
+ //
+ // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
SSECustomerKeyMD5 *string
// Specifies the byte range of the object to get the records from. A record is
// processed when its first byte is contained by the range. This parameter is
// optional, but when specified, it must not be empty. See RFC 2616, Section
- // 14.35.1 about how to specify the start and end of the range. ScanRange may be
- // used in the following ways:
+ // 14.35.1 about how to specify the start and end of the range.
+ //
+ // ScanRange may be used in the following ways:
+ //
// - 50100 - process only the records starting between the bytes 50 and 100
// (inclusive, counting from zero)
+ //
// - 50 - process only the records starting after the byte 50
+ //
// - 50 - process only the records within the last 50 bytes of the file.
ScanRange *types.ScanRange
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go
index ff73197949..e78791a865 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go
@@ -16,38 +16,48 @@ import (
"io"
)
-// Uploads a part in a multipart upload. In this operation, you provide new data
-// as a part of an object in your request. However, you have an option to specify
-// your existing Amazon S3 object as a data source for the part you are uploading.
-// To upload a part from an existing object, you use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html)
-// operation. You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
-// ) before you can upload any part. In response to your initiate request, Amazon
-// S3 returns an upload ID, a unique identifier that you must include in your
-// upload part request. Part numbers can be any number from 1 to 10,000, inclusive.
-// A part number uniquely identifies a part and also defines its position within
-// the object being created. If you upload a new part using the same part number
-// that was used with a previous part, the previously uploaded part is overwritten.
+// Uploads a part in a multipart upload.
+//
+// In this operation, you provide new data as a part of an object in your request.
+// However, you have an option to specify your existing Amazon S3 object as a data
+// source for the part you are uploading. To upload a part from an existing object,
+// you use the [UploadPartCopy]operation.
+//
+// You must initiate a multipart upload (see [CreateMultipartUpload]) before you can upload any part. In
+// response to your initiate request, Amazon S3 returns an upload ID, a unique
+// identifier that you must include in your upload part request.
+//
+// Part numbers can be any number from 1 to 10,000, inclusive. A part number
+// uniquely identifies a part and also defines its position within the object being
+// created. If you upload a new part using the same part number that was used with
+// a previous part, the previously uploaded part is overwritten.
+//
// For information about maximum and minimum part sizes and other multipart upload
-// specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html)
-// in the Amazon S3 User Guide. After you initiate multipart upload and upload one
-// or more parts, you must either complete or abort multipart upload in order to
-// stop getting charged for storage of the uploaded parts. Only after you either
-// complete or abort multipart upload, Amazon S3 frees up the parts storage and
-// stops charging you for the parts storage. For more information on multipart
-// uploads, go to Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html)
-// in the Amazon S3 User Guide . Directory buckets - For directory buckets, you
-// must make requests for this API operation to the Zonal endpoint. These endpoints
-// support virtual-hosted-style requests in the format
+// specifications, see [Multipart upload limits]in the Amazon S3 User Guide.
+//
+// After you initiate multipart upload and upload one or more parts, you must
+// either complete or abort multipart upload in order to stop getting charged for
+// storage of the uploaded parts. Only after you either complete or abort multipart
+// upload, Amazon S3 frees up the parts storage and stops charging you for the
+// parts storage.
+//
+// For more information on multipart uploads, go to [Multipart Upload Overview] in the Amazon S3 User Guide .
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style
-// requests are not supported. For more information, see Regional and Zonal
-// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide. Permissions
+// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User
+// Guide.
+//
+// Permissions
+//
// - General purpose bucket permissions - For information on the permissions
-// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
-// in the Amazon S3 User Guide.
+// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide.
+//
// - Directory bucket permissions - To grant access to this API operation on a
-// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// API operation for session-based authorization. Specifically, you grant the
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
// s3express:CreateSession permission to the directory bucket in a bucket policy
// or an IAM identity-based policy. Then, you make the CreateSession API call on
// the bucket to obtain a session token. With the session token in your request
@@ -55,19 +65,19 @@ import (
// expires, you make another CreateSession API call to generate a new session
// token for use. Amazon Web Services CLI or SDKs create session and refresh the
// session token automatically to avoid service interruptions when a session
-// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
-// .
+// expires. For more information about authorization, see [CreateSession]CreateSession .
//
-// Data integrity General purpose bucket - To ensure that data is not corrupted
+// Data integrity General purpose bucket - To ensure that data is not corrupted
// traversing the network, specify the Content-MD5 header in the upload part
// request. Amazon S3 checks the part data against the provided MD5 value. If they
// do not match, Amazon S3 returns an error. If the upload request is signed with
// Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256
-// header as a checksum instead of Content-MD5 . For more information see
-// Authenticating Requests: Using the Authorization Header (Amazon Web Services
-// Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html)
-// . Directory buckets - MD5 is not supported by directory buckets. You can use
-// checksum algorithms to check object integrity. Encryption
+// header as a checksum instead of Content-MD5 . For more information see [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)].
+//
+// Directory buckets - MD5 is not supported by directory buckets. You can use
+// checksum algorithms to check object integrity.
+//
+// Encryption
// - General purpose bucket - Server-side encryption is for data encryption at
// rest. Amazon S3 encrypts your data as it writes it to disks in its data centers
// and decrypts it when you access it. You have mutually exclusive options to
@@ -78,37 +88,70 @@ import (
// encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally
// tell Amazon S3 to encrypt data at rest using server-side encryption with other
// key options. The option you use depends on whether you want to use KMS keys
-// (SSE-KMS) or provide your own encryption key (SSE-C). Server-side encryption is
-// supported by the S3 Multipart Upload operations. Unless you are using a
-// customer-provided encryption key (SSE-C), you don't need to specify the
-// encryption parameters in each UploadPart request. Instead, you only need to
-// specify the server-side encryption parameters in the initial Initiate Multipart
-// request. For more information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
-// . If you request server-side encryption using a customer-provided encryption key
-// (SSE-C) in your initiate multipart upload request, you must provide identical
-// encryption information in each part upload using the following request headers.
-// - x-amz-server-side-encryption-customer-algorithm
-// - x-amz-server-side-encryption-customer-key
-// - x-amz-server-side-encryption-customer-key-MD5
-// - Directory bucket - For directory buckets, only server-side encryption with
-// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported.
-//
-// For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html)
-// in the Amazon S3 User Guide. Special errors
+// (SSE-KMS) or provide your own encryption key (SSE-C).
+//
+// Server-side encryption is supported by the S3 Multipart Upload operations.
+//
+// Unless you are using a customer-provided encryption key (SSE-C), you don't need
+// to specify the encryption parameters in each UploadPart request. Instead, you
+// only need to specify the server-side encryption parameters in the initial
+// Initiate Multipart request. For more information, see [CreateMultipartUpload].
+//
+// If you request server-side encryption using a customer-provided encryption key
+//
+// (SSE-C) in your initiate multipart upload request, you must provide identical
+// encryption information in each part upload using the following request headers.
+//
+// - x-amz-server-side-encryption-customer-algorithm
+//
+// - x-amz-server-side-encryption-customer-key
+//
+// - x-amz-server-side-encryption-customer-key-MD5
+//
+// - Directory bucket - For directory buckets, only server-side encryption with
+// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported.
+//
+// For more information, see [Using Server-Side Encryption] in the Amazon S3 User Guide.
+//
+// Special errors
+//
// - Error Code: NoSuchUpload
+//
// - Description: The specified multipart upload does not exist. The upload ID
// might be invalid, or the multipart upload might have been aborted or completed.
+//
// - HTTP Status Code: 404 Not Found
+//
// - SOAP Fault Code Prefix: Client
//
-// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
-// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are
-// related to UploadPart :
-// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
-// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
-// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
-// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
-// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket_name.s3express-az_id.region.amazonaws.com .
+//
+// The following operations are related to UploadPart :
+//
+// [CreateMultipartUpload]
+//
+// [CompleteMultipartUpload]
+//
+// [AbortMultipartUpload]
+//
+// [ListParts]
+//
+// [ListMultipartUploads]
+//
+// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
+// [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
+// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
+// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
+// [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html
+// [Multipart upload limits]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html
+// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
+// [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html
+// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
+// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns ...func(*Options)) (*UploadPartOutput, error) {
if params == nil {
params = &UploadPartInput{}
@@ -126,31 +169,39 @@ func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns
type UploadPartInput struct {
- // The name of the bucket to which the multipart upload was initiated. Directory
- // buckets - When you use this operation with a directory bucket, you must use
- // virtual-hosted-style requests in the format
+ // The name of the bucket to which the multipart upload was initiated.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
- // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide. Access points - When you use this action with an
- // access point, you must provide the alias of the access point in place of the
- // bucket name or specify the access point ARN. When using the access point ARN,
- // you must direct requests to the access point hostname. The access point hostname
- // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. Access points and Object Lambda access points are
- // not supported by directory buckets. S3 on Outposts - When you use this action
- // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
- // hostname. The S3 on Outposts hostname takes the form
+ // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
@@ -178,40 +229,48 @@ type UploadPartInput struct {
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
- // request with the HTTP status code 400 Bad Request . For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
- // ignores any provided ChecksumAlgorithm parameter. This checksum algorithm must
- // be the same for all parts and it match the checksum value supplied in the
- // CreateMultipartUpload request.
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // This checksum algorithm must be the same for all parts and it match the
+ // checksum value supplied in the CreateMultipartUpload request.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
- // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity]
// in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumCRC32 *string
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
- // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity]
// in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumCRC32C *string
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
- // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see [Checking object integrity]
// in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumSHA1 *string
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
- // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see [Checking object integrity]
// in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumSHA256 *string
// Size of the body in bytes. This parameter is useful when the size of the body
@@ -220,8 +279,9 @@ type UploadPartInput struct {
// The base64-encoded 128-bit MD5 digest of the part data. This parameter is
// auto-populated when using the command from the CLI. This parameter is required
- // if object lock parameters are specified. This functionality is not supported for
- // directory buckets.
+ // if object lock parameters are specified.
+ //
+ // This functionality is not supported for directory buckets.
ContentMD5 *string
// The account ID of the expected bucket owner. If the account ID that you provide
@@ -233,14 +293,17 @@ type UploadPartInput struct {
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
- // Specifies the algorithm to use when encrypting the object (for example,
- // AES256). This functionality is not supported for directory buckets.
+ // Specifies the algorithm to use when encrypting the object (for example, AES256).
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string
// Specifies the customer-provided encryption key for Amazon S3 to use in
@@ -248,14 +311,16 @@ type UploadPartInput struct {
// discarded; Amazon S3 does not store the encryption key. The key must be
// appropriate for use with the algorithm specified in the
// x-amz-server-side-encryption-customer-algorithm header . This must be the same
- // encryption key specified in the initiate multipart upload request. This
- // functionality is not supported for directory buckets.
+ // encryption key specified in the initiate multipart upload request.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
- // encryption key was transmitted without error. This functionality is not
- // supported for directory buckets.
+ // encryption key was transmitted without error.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string
noSmithyDocumentSerde
@@ -270,8 +335,9 @@ func (in *UploadPartInput) bindEndpointParams(p *EndpointParameters) {
type UploadPartOutput struct {
// Indicates whether the multipart upload uses an S3 Bucket Key for server-side
- // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality
- // is not supported for directory buckets.
+ // encryption with Key Management Service (KMS) keys (SSE-KMS).
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool
// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
@@ -279,8 +345,10 @@ type UploadPartOutput struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumCRC32 *string
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
@@ -288,8 +356,10 @@ type UploadPartOutput struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumCRC32C *string
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
@@ -297,8 +367,10 @@ type UploadPartOutput struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumSHA1 *string
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
@@ -306,37 +378,46 @@ type UploadPartOutput struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumSHA256 *string
// Entity tag for the uploaded object.
ETag *string
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// If server-side encryption with a customer-provided encryption key was
// requested, the response will include this header to confirm the encryption
- // algorithm that's used. This functionality is not supported for directory
- // buckets.
+ // algorithm that's used.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string
// If server-side encryption with a customer-provided encryption key was
// requested, the response will include this header to provide the round-trip
- // message integrity verification of the customer-provided encryption key. This
- // functionality is not supported for directory buckets.
+ // message integrity verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string
// If present, indicates the ID of the Key Management Service (KMS) symmetric
- // encryption customer managed key that was used for the object. This functionality
- // is not supported for directory buckets.
+ // encryption customer managed key that was used for the object.
+ //
+ // This functionality is not supported for directory buckets.
SSEKMSKeyId *string
// The server-side encryption algorithm used when you store this object in Amazon
- // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side
- // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported.
+ // S3 (for example, AES256 , aws:kms ).
+ //
+ // For directory buckets, only server-side encryption with Amazon S3 managed keys
+ // (SSE-S3) ( AES256 ) is supported.
ServerSideEncryption types.ServerSideEncryption
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go
index d42dc60cd3..23fb6ef516 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go
@@ -18,90 +18,134 @@ import (
// Uploads a part by copying data from an existing object as data source. To
// specify the data source, you add the request header x-amz-copy-source in your
// request. To specify a byte range, you add the request header
-// x-amz-copy-source-range in your request. For information about maximum and
-// minimum part sizes and other multipart upload specifications, see Multipart
-// upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html)
-// in the Amazon S3 User Guide. Instead of copying data from an existing object as
-// part data, you might use the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
-// action to upload new data as a part of an object in your request. You must
-// initiate a multipart upload before you can upload any part. In response to your
-// initiate request, Amazon S3 returns the upload ID, a unique identifier that you
-// must include in your upload part request. For conceptual information about
-// multipart uploads, see Uploading Objects Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html)
-// in the Amazon S3 User Guide. For information about copying objects using a
-// single atomic action vs. a multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html)
-// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must
-// make requests for this API operation to the Zonal endpoint. These endpoints
-// support virtual-hosted-style requests in the format
+// x-amz-copy-source-range in your request.
+//
+// For information about maximum and minimum part sizes and other multipart upload
+// specifications, see [Multipart upload limits]in the Amazon S3 User Guide.
+//
+// Instead of copying data from an existing object as part data, you might use the [UploadPart]
+// action to upload new data as a part of an object in your request.
+//
+// You must initiate a multipart upload before you can upload any part. In
+// response to your initiate request, Amazon S3 returns the upload ID, a unique
+// identifier that you must include in your upload part request.
+//
+// For conceptual information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User
+// Guide. For information about copying objects using a single atomic action vs. a
+// multipart upload, see [Operations on Objects]in the Amazon S3 User Guide.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style
-// requests are not supported. For more information, see Regional and Zonal
-// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
-// in the Amazon S3 User Guide. Authentication and authorization All UploadPartCopy
-// requests must be authenticated and signed by using IAM credentials (access key
-// ID and secret access key for the IAM identities). All headers with the x-amz-
-// prefix, including x-amz-copy-source , must be signed. For more information, see
-// REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html)
-// . Directory buckets - You must use IAM credentials to authenticate and authorize
+// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User
+// Guide.
+//
+// Authentication and authorization All UploadPartCopy requests must be
+// authenticated and signed by using IAM credentials (access key ID and secret
+// access key for the IAM identities). All headers with the x-amz- prefix,
+// including x-amz-copy-source , must be signed. For more information, see [REST Authentication].
+//
+// Directory buckets - You must use IAM credentials to authenticate and authorize
// your access to the UploadPartCopy API operation, instead of using the temporary
-// security credentials through the CreateSession API operation. Amazon Web
-// Services CLI or SDKs handles authentication and authorization on your behalf.
+// security credentials through the CreateSession API operation.
+//
+// Amazon Web Services CLI or SDKs handles authentication and authorization on
+// your behalf.
+//
// Permissions You must have READ access to the source object and WRITE access to
// the destination bucket.
+//
// - General purpose bucket permissions - You must have the permissions in a
// policy based on the bucket types of your source bucket and destination bucket in
// an UploadPartCopy operation.
+//
// - If the source object is in a general purpose bucket, you must have the
// s3:GetObject permission to read the source object that is being copied.
+//
// - If the destination bucket is a general purpose bucket, you must have the
// s3:PutObject permission to write the object copy to the destination bucket.
-// For information about permissions required to use the multipart upload API, see
-// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
-// in the Amazon S3 User Guide.
-// - Directory bucket permissions - You must have permissions in a bucket policy
-// or an IAM identity-based policy based on the source and destination bucket types
-// in an UploadPartCopy operation.
-// - If the source object that you want to copy is in a directory bucket, you
-// must have the s3express:CreateSession permission in the Action element of a
-// policy to read the object . By default, the session is in the ReadWrite mode.
-// If you want to restrict the access, you can explicitly set the
-// s3express:SessionMode condition key to ReadOnly on the copy source bucket.
-// - If the copy destination is a directory bucket, you must have the
-// s3express:CreateSession permission in the Action element of a policy to write
-// the object to the destination. The s3express:SessionMode condition key cannot
-// be set to ReadOnly on the copy destination. For example policies, see Example
-// bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html)
-// and Amazon Web Services Identity and Access Management (IAM) identity-based
-// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html)
-// in the Amazon S3 User Guide.
+//
+// For information about permissions required to use the multipart upload API, see [Multipart Upload and Permissions]
+//
+// in the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - You must have permissions in a bucket policy
+// or an IAM identity-based policy based on the source and destination bucket types
+// in an UploadPartCopy operation.
+//
+// - If the source object that you want to copy is in a directory bucket, you
+// must have the s3express:CreateSession permission in the Action element of a
+// policy to read the object . By default, the session is in the ReadWrite mode.
+// If you want to restrict the access, you can explicitly set the
+// s3express:SessionMode condition key to ReadOnly on the copy source bucket.
+//
+// - If the copy destination is a directory bucket, you must have the
+// s3express:CreateSession permission in the Action element of a policy to write
+// the object to the destination. The s3express:SessionMode condition key cannot
+// be set to ReadOnly on the copy destination.
+//
+// For example policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 User Guide.
//
// Encryption
-// - General purpose buckets - For information about using server-side
-// encryption with customer-provided encryption keys with the UploadPartCopy
-// operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html)
-// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
-// .
-// - Directory buckets - For directory buckets, only server-side encryption with
-// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported.
+// - General purpose buckets -
+//
+// For information about using server-side encryption with customer-provided
+//
+// encryption keys with the UploadPartCopy operation, see [CopyObject]and [UploadPart].
+//
+// - Directory buckets - For directory buckets, only server-side encryption with
+// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported.
//
// Special errors
+//
// - Error Code: NoSuchUpload
+//
// - Description: The specified multipart upload does not exist. The upload ID
// might be invalid, or the multipart upload might have been aborted or completed.
+//
// - HTTP Status Code: 404 Not Found
+//
// - Error Code: InvalidRequest
+//
// - Description: The specified copy source is not supported as a byte-range
// copy source.
+//
// - HTTP Status Code: 400 Bad Request
//
-// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
-// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are
-// related to UploadPartCopy :
-// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
-// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
-// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
-// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
-// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
-// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html)
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket_name.s3express-az_id.region.amazonaws.com .
+//
+// The following operations are related to UploadPartCopy :
+//
+// [CreateMultipartUpload]
+//
+// [UploadPart]
+//
+// [CompleteMultipartUpload]
+//
+// [AbortMultipartUpload]
+//
+// [ListParts]
+//
+// [ListMultipartUploads]
+//
+// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html
+// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
+// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html
+// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html
+// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
+// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
+// [Multipart upload limits]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html
+// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html
+// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
+// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
+// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html
+// [Operations on Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html
+// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
+// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
+//
+// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput, optFns ...func(*Options)) (*UploadPartCopyOutput, error) {
if params == nil {
params = &UploadPartCopyInput{}
@@ -119,43 +163,53 @@ func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput
type UploadPartCopyInput struct {
- // The bucket name. Directory buckets - When you use this operation with a
- // directory bucket, you must use virtual-hosted-style requests in the format
+ // The bucket name.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
- // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
- // in the Amazon S3 User Guide. Access points - When you use this action with an
- // access point, you must provide the alias of the access point in place of the
- // bucket name or specify the access point ARN. When using the access point ARN,
- // you must direct requests to the access point hostname. The access point hostname
- // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide. Access points and Object Lambda access points are
- // not supported by directory buckets. S3 on Outposts - When you use this action
- // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
- // hostname. The S3 on Outposts hostname takes the form
+ // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must
+ // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
// Specifies the source object for the copy operation. You specify the value in
// one of two formats, depending on whether you want to access the source object
- // through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html)
- // :
+ // through an [access point]:
+ //
// - For objects not accessed through an access point, specify the name of the
// source bucket and key of the source object, separated by a slash (/). For
// example, to copy the object reports/january.pdf from the bucket
// awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value must
// be URL-encoded.
+ //
// - For objects accessed through access points, specify the Amazon Resource
// Name (ARN) of the object as accessed through the access point, in the format
// arn:aws:s3:::accesspoint//object/ . For example, to copy the object
@@ -163,28 +217,39 @@ type UploadPartCopyInput struct {
// 123456789012 in Region us-west-2 , use the URL encoding of
// arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf
// . The value must be URL encoded.
+ //
// - Amazon S3 supports copy operations using Access points only when the source
// and destination buckets are in the same Amazon Web Services Region.
- // - Access points are not supported by directory buckets. Alternatively, for
- // objects accessed through Amazon S3 on Outposts, specify the ARN of the object as
- // accessed in the format arn:aws:s3-outposts:::outpost//object/ . For example,
- // to copy the object reports/january.pdf through outpost my-outpost owned by
- // account 123456789012 in Region us-west-2 , use the URL encoding of
+ //
+ // - Access points are not supported by directory buckets.
+ //
+ // Alternatively, for objects accessed through Amazon S3 on Outposts, specify the
+ // ARN of the object as accessed in the format
+ // arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object
+ // reports/january.pdf through outpost my-outpost owned by account 123456789012
+ // in Region us-west-2 , use the URL encoding of
// arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf
// . The value must be URL-encoded.
+ //
// If your bucket has versioning enabled, you could have multiple versions of the
// same object. By default, x-amz-copy-source identifies the current version of
// the source object to copy. To copy a specific version of the source object to
// copy, append ?versionId= to the x-amz-copy-source request header (for example,
// x-amz-copy-source:
// /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893
- // ). If the current version is a delete marker and you don't specify a versionId
- // in the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found
- // error, because the object does not exist. If you specify versionId in the
+ // ).
+ //
+ // If the current version is a delete marker and you don't specify a versionId in
+ // the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found error,
+ // because the object does not exist. If you specify versionId in the
// x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns an
// HTTP 400 Bad Request error, because you are not allowed to specify a delete
- // marker as a version for the x-amz-copy-source . Directory buckets - S3
- // Versioning isn't enabled and supported for directory buckets.
+ // marker as a version for the x-amz-copy-source .
+ //
+ // Directory buckets - S3 Versioning isn't enabled and supported for directory
+ // buckets.
+ //
+ // [access point]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html
//
// This member is required.
CopySource *string
@@ -205,34 +270,56 @@ type UploadPartCopyInput struct {
// This member is required.
UploadId *string
- // Copies the object if its entity tag (ETag) matches the specified tag. If both
- // of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
- // headers are present in the request as follows: x-amz-copy-source-if-match
- // condition evaluates to true , and; x-amz-copy-source-if-unmodified-since
- // condition evaluates to false ; Amazon S3 returns 200 OK and copies the data.
+ // Copies the object if its entity tag (ETag) matches the specified tag.
+ //
+ // If both of the x-amz-copy-source-if-match and
+ // x-amz-copy-source-if-unmodified-since headers are present in the request as
+ // follows:
+ //
+ // x-amz-copy-source-if-match condition evaluates to true , and;
+ //
+ // x-amz-copy-source-if-unmodified-since condition evaluates to false ;
+ //
+ // Amazon S3 returns 200 OK and copies the data.
CopySourceIfMatch *string
- // Copies the object if it has been modified since the specified time. If both of
- // the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since
- // headers are present in the request as follows: x-amz-copy-source-if-none-match
- // condition evaluates to false , and; x-amz-copy-source-if-modified-since
- // condition evaluates to true ; Amazon S3 returns 412 Precondition Failed
- // response code.
+ // Copies the object if it has been modified since the specified time.
+ //
+ // If both of the x-amz-copy-source-if-none-match and
+ // x-amz-copy-source-if-modified-since headers are present in the request as
+ // follows:
+ //
+ // x-amz-copy-source-if-none-match condition evaluates to false , and;
+ //
+ // x-amz-copy-source-if-modified-since condition evaluates to true ;
+ //
+ // Amazon S3 returns 412 Precondition Failed response code.
CopySourceIfModifiedSince *time.Time
- // Copies the object if its entity tag (ETag) is different than the specified
- // ETag. If both of the x-amz-copy-source-if-none-match and
+ // Copies the object if its entity tag (ETag) is different than the specified ETag.
+ //
+ // If both of the x-amz-copy-source-if-none-match and
// x-amz-copy-source-if-modified-since headers are present in the request as
- // follows: x-amz-copy-source-if-none-match condition evaluates to false , and;
- // x-amz-copy-source-if-modified-since condition evaluates to true ; Amazon S3
- // returns 412 Precondition Failed response code.
+ // follows:
+ //
+ // x-amz-copy-source-if-none-match condition evaluates to false , and;
+ //
+ // x-amz-copy-source-if-modified-since condition evaluates to true ;
+ //
+ // Amazon S3 returns 412 Precondition Failed response code.
CopySourceIfNoneMatch *string
- // Copies the object if it hasn't been modified since the specified time. If both
- // of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
- // headers are present in the request as follows: x-amz-copy-source-if-match
- // condition evaluates to true , and; x-amz-copy-source-if-unmodified-since
- // condition evaluates to false ; Amazon S3 returns 200 OK and copies the data.
+ // Copies the object if it hasn't been modified since the specified time.
+ //
+ // If both of the x-amz-copy-source-if-match and
+ // x-amz-copy-source-if-unmodified-since headers are present in the request as
+ // follows:
+ //
+ // x-amz-copy-source-if-match condition evaluates to true , and;
+ //
+ // x-amz-copy-source-if-unmodified-since condition evaluates to false ;
+ //
+ // Amazon S3 returns 200 OK and copies the data.
CopySourceIfUnmodifiedSince *time.Time
// The range of bytes to copy from the source object. The range value must use the
@@ -243,20 +330,26 @@ type UploadPartCopyInput struct {
CopySourceRange *string
// Specifies the algorithm to use when decrypting the source object (for example,
- // AES256 ). This functionality is not supported when the source object is in a
- // directory bucket.
+ // AES256 ).
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
CopySourceSSECustomerAlgorithm *string
// Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
// the source object. The encryption key provided in this header must be one that
- // was used when the source object was created. This functionality is not supported
- // when the source object is in a directory bucket.
+ // was used when the source object was created.
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
CopySourceSSECustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
- // encryption key was transmitted without error. This functionality is not
- // supported when the source object is in a directory bucket.
+ // encryption key was transmitted without error.
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
CopySourceSSECustomerKeyMD5 *string
// The account ID of the expected destination bucket owner. If the account ID that
@@ -273,15 +366,18 @@ type UploadPartCopyInput struct {
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
- // downloading objects from Requester Pays buckets, see Downloading Objects in
- // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets.
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
- // Specifies the algorithm to use when encrypting the object (for example,
- // AES256). This functionality is not supported when the destination bucket is a
- // directory bucket.
+ // Specifies the algorithm to use when encrypting the object (for example, AES256).
+ //
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
SSECustomerAlgorithm *string
// Specifies the customer-provided encryption key for Amazon S3 to use in
@@ -289,15 +385,18 @@ type UploadPartCopyInput struct {
// discarded; Amazon S3 does not store the encryption key. The key must be
// appropriate for use with the algorithm specified in the
// x-amz-server-side-encryption-customer-algorithm header. This must be the same
- // encryption key specified in the initiate multipart upload request. This
- // functionality is not supported when the destination bucket is a directory
+ // encryption key specified in the initiate multipart upload request.
+ //
+ // This functionality is not supported when the destination bucket is a directory
// bucket.
SSECustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
- // encryption key was transmitted without error. This functionality is not
- // supported when the destination bucket is a directory bucket.
+ // encryption key was transmitted without error.
+ //
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
SSECustomerKeyMD5 *string
noSmithyDocumentSerde
@@ -311,42 +410,52 @@ func (in *UploadPartCopyInput) bindEndpointParams(p *EndpointParameters) {
type UploadPartCopyOutput struct {
// Indicates whether the multipart upload uses an S3 Bucket Key for server-side
- // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality
- // is not supported for directory buckets.
+ // encryption with Key Management Service (KMS) keys (SSE-KMS).
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool
// Container for all response elements.
CopyPartResult *types.CopyPartResult
// The version of the source object that was copied, if you have enabled
- // versioning on the source bucket. This functionality is not supported when the
- // source object is in a directory bucket.
+ // versioning on the source bucket.
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
CopySourceVersionId *string
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// If server-side encryption with a customer-provided encryption key was
// requested, the response will include this header to confirm the encryption
- // algorithm that's used. This functionality is not supported for directory
- // buckets.
+ // algorithm that's used.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string
// If server-side encryption with a customer-provided encryption key was
// requested, the response will include this header to provide the round-trip
- // message integrity verification of the customer-provided encryption key. This
- // functionality is not supported for directory buckets.
+ // message integrity verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string
// If present, indicates the ID of the Key Management Service (KMS) symmetric
- // encryption customer managed key that was used for the object. This functionality
- // is not supported for directory buckets.
+ // encryption customer managed key that was used for the object.
+ //
+ // This functionality is not supported for directory buckets.
SSEKMSKeyId *string
// The server-side encryption algorithm used when you store this object in Amazon
- // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side
- // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported.
+ // S3 (for example, AES256 , aws:kms ).
+ //
+ // For directory buckets, only server-side encryption with Amazon S3 managed keys
+ // (SSE-S3) ( AES256 ) is supported.
ServerSideEncryption types.ServerSideEncryption
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go
index e181ab7111..4dfa64e8dd 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go
@@ -18,42 +18,54 @@ import (
"time"
)
-// This operation is not supported by directory buckets. Passes transformed
-// objects to a GetObject operation when using Object Lambda access points. For
-// information about Object Lambda access points, see Transforming objects with
-// Object Lambda access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html)
-// in the Amazon S3 User Guide. This operation supports metadata that can be
-// returned by GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
-// , in addition to RequestRoute , RequestToken , StatusCode , ErrorCode , and
-// ErrorMessage . The GetObject response metadata is supported so that the
-// WriteGetObjectResponse caller, typically an Lambda function, can provide the
-// same metadata when it internally invokes GetObject . When WriteGetObjectResponse
-// is called by a customer-owned Lambda function, the metadata returned to the end
-// user GetObject call might differ from what Amazon S3 would normally return. You
-// can include any number of metadata headers. When including a metadata header, it
-// should be prefaced with x-amz-meta . For example, x-amz-meta-my-custom-header:
-// MyCustomValue . The primary use case for this is to forward GetObject metadata.
+// This operation is not supported by directory buckets.
+//
+// Passes transformed objects to a GetObject operation when using Object Lambda
+// access points. For information about Object Lambda access points, see [Transforming objects with Object Lambda access points]in the
+// Amazon S3 User Guide.
+//
+// This operation supports metadata that can be returned by [GetObject], in addition to
+// RequestRoute , RequestToken , StatusCode , ErrorCode , and ErrorMessage . The
+// GetObject response metadata is supported so that the WriteGetObjectResponse
+// caller, typically an Lambda function, can provide the same metadata when it
+// internally invokes GetObject . When WriteGetObjectResponse is called by a
+// customer-owned Lambda function, the metadata returned to the end user GetObject
+// call might differ from what Amazon S3 would normally return.
+//
+// You can include any number of metadata headers. When including a metadata
+// header, it should be prefaced with x-amz-meta . For example,
+// x-amz-meta-my-custom-header: MyCustomValue . The primary use case for this is to
+// forward GetObject metadata.
+//
// Amazon Web Services provides some prebuilt Lambda functions that you can use
// with S3 Object Lambda to detect and redact personally identifiable information
// (PII) and decompress S3 objects. These Lambda functions are available in the
// Amazon Web Services Serverless Application Repository, and can be selected
// through the Amazon Web Services Management Console when you create your Object
-// Lambda access point. Example 1: PII Access Control - This Lambda function uses
-// Amazon Comprehend, a natural language processing (NLP) service using machine
-// learning to find insights and relationships in text. It automatically detects
-// personally identifiable information (PII) such as names, addresses, dates,
-// credit card numbers, and social security numbers from documents in your Amazon
-// S3 bucket. Example 2: PII Redaction - This Lambda function uses Amazon
-// Comprehend, a natural language processing (NLP) service using machine learning
-// to find insights and relationships in text. It automatically redacts personally
+// Lambda access point.
+//
+// Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a
+// natural language processing (NLP) service using machine learning to find
+// insights and relationships in text. It automatically detects personally
// identifiable information (PII) such as names, addresses, dates, credit card
// numbers, and social security numbers from documents in your Amazon S3 bucket.
+//
+// Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a
+// natural language processing (NLP) service using machine learning to find
+// insights and relationships in text. It automatically redacts personally
+// identifiable information (PII) such as names, addresses, dates, credit card
+// numbers, and social security numbers from documents in your Amazon S3 bucket.
+//
// Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is
// equipped to decompress objects stored in S3 in one of six compressed file
-// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. For information
-// on how to view and use these functions, see Using Amazon Web Services built
-// Lambda functions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html)
-// in the Amazon S3 User Guide.
+// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP.
+//
+// For information on how to view and use these functions, see [Using Amazon Web Services built Lambda functions] in the Amazon S3
+// User Guide.
+//
+// [Transforming objects with Object Lambda access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html
+// [Using Amazon Web Services built Lambda functions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
func (c *Client) WriteGetObjectResponse(ctx context.Context, params *WriteGetObjectResponseInput, optFns ...func(*Options)) (*WriteGetObjectResponseOutput, error) {
if params == nil {
params = &WriteGetObjectResponseInput{}
@@ -88,7 +100,7 @@ type WriteGetObjectResponseInput struct {
// The object data.
Body io.Reader
- // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for
+ // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for
// server-side encryption with Amazon Web Services KMS (SSE-KMS).
BucketKeyEnabled *bool
@@ -101,9 +113,12 @@ type WriteGetObjectResponseInput struct {
// Lambda function. This may not match the checksum for the object stored in Amazon
// S3. Amazon S3 will perform validation of the checksum values only when the
// original GetObject request required checksum validation. For more information
- // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. Only one checksum header can be specified at a
- // time. If you supply multiple checksum headers, this request will fail.
+ // about checksums, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // Only one checksum header can be specified at a time. If you supply multiple
+ // checksum headers, this request will fail.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumCRC32 *string
// This header can be used as a data integrity check to verify that the data
@@ -112,9 +127,12 @@ type WriteGetObjectResponseInput struct {
// Lambda function. This may not match the checksum for the object stored in Amazon
// S3. Amazon S3 will perform validation of the checksum values only when the
// original GetObject request required checksum validation. For more information
- // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. Only one checksum header can be specified at a
- // time. If you supply multiple checksum headers, this request will fail.
+ // about checksums, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // Only one checksum header can be specified at a time. If you supply multiple
+ // checksum headers, this request will fail.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumCRC32C *string
// This header can be used as a data integrity check to verify that the data
@@ -123,9 +141,12 @@ type WriteGetObjectResponseInput struct {
// function. This may not match the checksum for the object stored in Amazon S3.
// Amazon S3 will perform validation of the checksum values only when the original
// GetObject request required checksum validation. For more information about
- // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. Only one checksum header can be specified at a
- // time. If you supply multiple checksum headers, this request will fail.
+ // checksums, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // Only one checksum header can be specified at a time. If you supply multiple
+ // checksum headers, this request will fail.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumSHA1 *string
// This header can be used as a data integrity check to verify that the data
@@ -134,9 +155,12 @@ type WriteGetObjectResponseInput struct {
// Lambda function. This may not match the checksum for the object stored in Amazon
// S3. Amazon S3 will perform validation of the checksum values only when the
// original GetObject request required checksum validation. For more information
- // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide. Only one checksum header can be specified at a
- // time. If you supply multiple checksum headers, this request will fail.
+ // about checksums, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // Only one checksum header can be specified at a time. If you supply multiple
+ // checksum headers, this request will fail.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumSHA256 *string
// Specifies presentational information for the object.
@@ -205,8 +229,9 @@ type WriteGetObjectResponseInput struct {
ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
// Indicates whether an object stored in Amazon S3 has Object Lock enabled. For
- // more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html)
- // .
+ // more information about S3 Object Lock, see [Object Lock].
+ //
+ // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html
ObjectLockMode types.ObjectLockMode
// The date and time when Object Lock is configured to expire.
@@ -216,12 +241,15 @@ type WriteGetObjectResponseInput struct {
PartsCount *int32
// Indicates if request involves bucket that is either a source or destination in
- // a Replication rule. For more information about S3 Replication, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html)
- // .
+ // a Replication rule. For more information about S3 Replication, see [Replication].
+ //
+ // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html
ReplicationStatus types.ReplicationStatus
// If present, indicates that the requester was successfully charged for the
- // request. This functionality is not supported for directory buckets.
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// Provides information about object restoration operation and expiration time of
@@ -232,43 +260,59 @@ type WriteGetObjectResponseInput struct {
// encryption key was specified for object stored in Amazon S3.
SSECustomerAlgorithm *string
- // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 to
- // encrypt data stored in S3. For more information, see Protecting data using
- // server-side encryption with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html)
- // .
+ // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 to
+ // encrypt data stored in S3. For more information, see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)].
+ //
+ // [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html
SSECustomerKeyMD5 *string
- // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web
+ // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web
// Services Key Management Service (Amazon Web Services KMS) symmetric encryption
// customer managed key that was used for stored in Amazon S3 object.
SSEKMSKeyId *string
- // The server-side encryption algorithm used when storing requested object in
+ // The server-side encryption algorithm used when storing requested object in
// Amazon S3 (for example, AES256, aws:kms ).
ServerSideEncryption types.ServerSideEncryption
// The integer status code for an HTTP response of a corresponding GetObject
// request. The following is a list of status codes.
+ //
// - 200 - OK
+ //
// - 206 - Partial Content
+ //
// - 304 - Not Modified
+ //
// - 400 - Bad Request
+ //
// - 401 - Unauthorized
+ //
// - 403 - Forbidden
+ //
// - 404 - Not Found
+ //
// - 405 - Method Not Allowed
+ //
// - 409 - Conflict
+ //
// - 411 - Length Required
+ //
// - 412 - Precondition Failed
+ //
// - 416 - Range Not Satisfiable
+ //
// - 500 - Internal Server Error
+ //
// - 503 - Service Unavailable
StatusCode *int32
// Provides storage class information of the object. Amazon S3 returns this header
- // for all objects except for S3 Standard storage class objects. For more
- // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
- // .
+ // for all objects except for S3 Standard storage class objects.
+ //
+ // For more information, see [Storage Classes].
+ //
+ // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
StorageClass types.StorageClass
// The number of tags, if any, on the object.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go
index 2be5df30ff..d953cdc1ca 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go
@@ -25,8 +25,17 @@ import (
"io/ioutil"
"strconv"
"strings"
+ "time"
)
+func deserializeS3Expires(v string) (*time.Time, error) {
+ t, err := smithytime.ParseHTTPDate(v)
+ if err != nil {
+ return nil, nil
+ }
+ return &t, nil
+}
+
type awsRestxml_deserializeOpAbortMultipartUpload struct {
}
@@ -5504,12 +5513,17 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res
}
if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 {
- headerValues[0] = strings.TrimSpace(headerValues[0])
- t, err := smithytime.ParseHTTPDate(headerValues[0])
+ deserOverride, err := deserializeS3Expires(headerValues[0])
if err != nil {
return err
}
- v.Expires = ptr.Time(t)
+ v.Expires = deserOverride
+
+ }
+
+ if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ExpiresString = ptr.String(headerValues[0])
}
if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 {
@@ -7128,12 +7142,17 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r
}
if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 {
- headerValues[0] = strings.TrimSpace(headerValues[0])
- t, err := smithytime.ParseHTTPDate(headerValues[0])
+ deserOverride, err := deserializeS3Expires(headerValues[0])
if err != nil {
return err
}
- v.Expires = ptr.Time(t)
+ v.Expires = deserOverride
+
+ }
+
+ if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ExpiresString = ptr.String(headerValues[0])
}
if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go
index 04c6fdbb3e..d383412e95 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go
@@ -3,4 +3,4 @@
package s3
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.53.1"
+const goModuleVersion = "1.54.3"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go
index 064bcefb4f..c5ab084d12 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go
@@ -65,8 +65,10 @@ type Options struct {
// Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a
// value for this field will likely prevent you from using any endpoint-related
// service features released after the introduction of EndpointResolverV2 and
- // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom
- // endpoint, set the client option BaseEndpoint instead.
+ // BaseEndpoint.
+ //
+ // To migrate an EndpointResolver implementation that uses a custom endpoint, set
+ // the client option BaseEndpoint instead.
EndpointResolver EndpointResolver
// Resolves the endpoint used for a particular service operation. This should be
@@ -88,17 +90,20 @@ type Options struct {
// RetryMaxAttempts specifies the maximum number attempts an API client will call
// an operation that fails with a retryable error. A value of 0 is ignored, and
// will not be used to configure the API client created default retryer, or modify
- // per operation call's retry max attempts. If specified in an operation call's
- // functional options with a value that is different than the constructed client's
- // Options, the Client's Retryer will be wrapped to use the operation's specific
- // RetryMaxAttempts value.
+ // per operation call's retry max attempts.
+ //
+ // If specified in an operation call's functional options with a value that is
+ // different than the constructed client's Options, the Client's Retryer will be
+ // wrapped to use the operation's specific RetryMaxAttempts value.
RetryMaxAttempts int
// RetryMode specifies the retry mode the API client will be created with, if
- // Retryer option is not also specified. When creating a new API Clients this
- // member will only be used if the Retryer Options member is nil. This value will
- // be ignored if Retryer is not nil. Currently does not support per operation call
- // overrides, may in the future.
+ // Retryer option is not also specified.
+ //
+ // When creating a new API Clients this member will only be used if the Retryer
+ // Options member is nil. This value will be ignored if Retryer is not nil.
+ //
+ // Currently does not support per operation call overrides, may in the future.
RetryMode aws.RetryMode
// Retryer guides how HTTP requests should be retried in case of recoverable
@@ -141,8 +146,9 @@ type Options struct {
// The initial DefaultsMode used when the client options were constructed. If the
// DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
- // value was at that point in time. Currently does not support per operation call
- // overrides, may in the future.
+ // value was at that point in time.
+ //
+ // Currently does not support per operation call overrides, may in the future.
resolvedDefaultsMode aws.DefaultsMode
// The HTTP client to invoke API calls with. Defaults to client's default HTTP
@@ -193,6 +199,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for
// this field will likely prevent you from using any endpoint-related service
// features released after the introduction of EndpointResolverV2 and BaseEndpoint.
+//
// To migrate an EndpointResolver implementation that uses a custom endpoint, set
// the client option BaseEndpoint instead.
func WithEndpointResolver(v EndpointResolver) func(*Options) {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go
index 59524bdcbd..09334d4078 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go
@@ -118,7 +118,7 @@ func (m *awsRestxml_serializeOpCompleteMultipartUpload) HandleSerialize(ctx cont
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
- opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=CompleteMultipartUpload")
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
@@ -640,7 +640,7 @@ func (m *awsRestxml_serializeOpCreateMultipartUpload) HandleSerialize(ctx contex
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
- opPath, opQuery := httpbinding.SplitURI("/{Key+}?uploads&x-id=CreateMultipartUpload")
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?uploads")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
@@ -1803,7 +1803,7 @@ func (m *awsRestxml_serializeOpDeleteObjects) HandleSerialize(ctx context.Contex
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
- opPath, opQuery := httpbinding.SplitURI("/?delete&x-id=DeleteObjects")
+ opPath, opQuery := httpbinding.SplitURI("/?delete")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
@@ -7758,7 +7758,7 @@ func (m *awsRestxml_serializeOpRestoreObject) HandleSerialize(ctx context.Contex
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
- opPath, opQuery := httpbinding.SplitURI("/{Key+}?restore&x-id=RestoreObject")
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?restore")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
@@ -7866,7 +7866,7 @@ func (m *awsRestxml_serializeOpSelectObjectContent) HandleSerialize(ctx context.
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
- opPath, opQuery := httpbinding.SplitURI("/{Key+}?select&select-type=2&x-id=SelectObjectContent")
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?select&select-type=2")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
@@ -8341,7 +8341,7 @@ func (m *awsRestxml_serializeOpWriteGetObjectResponse) HandleSerialize(ctx conte
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
- opPath, opQuery := httpbinding.SplitURI("/WriteGetObjectResponse?x-id=WriteGetObjectResponse")
+ opPath, opQuery := httpbinding.SplitURI("/WriteGetObjectResponse")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go
index ea3b9c82ac..bcb956b261 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go
@@ -11,6 +11,7 @@ const (
// Values returns all known values for AnalyticsS3ExportFileFormat. Note that this
// can be expanded in the future, and so it is only as up to date as the client.
+//
// The ordering of this slice is not guaranteed to be stable across updates.
func (AnalyticsS3ExportFileFormat) Values() []AnalyticsS3ExportFileFormat {
return []AnalyticsS3ExportFileFormat{
@@ -27,8 +28,9 @@ const (
)
// Values returns all known values for ArchiveStatus. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (ArchiveStatus) Values() []ArchiveStatus {
return []ArchiveStatus{
"ARCHIVE_ACCESS",
@@ -45,8 +47,9 @@ const (
)
// Values returns all known values for BucketAccelerateStatus. Note that this can
-// be expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (BucketAccelerateStatus) Values() []BucketAccelerateStatus {
return []BucketAccelerateStatus{
"Enabled",
@@ -65,8 +68,9 @@ const (
)
// Values returns all known values for BucketCannedACL. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (BucketCannedACL) Values() []BucketCannedACL {
return []BucketCannedACL{
"private",
@@ -112,6 +116,7 @@ const (
// Values returns all known values for BucketLocationConstraint. Note that this
// can be expanded in the future, and so it is only as up to date as the client.
+//
// The ordering of this slice is not guaranteed to be stable across updates.
func (BucketLocationConstraint) Values() []BucketLocationConstraint {
return []BucketLocationConstraint{
@@ -156,8 +161,9 @@ const (
)
// Values returns all known values for BucketLogsPermission. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (BucketLogsPermission) Values() []BucketLogsPermission {
return []BucketLogsPermission{
"FULL_CONTROL",
@@ -174,8 +180,9 @@ const (
)
// Values returns all known values for BucketType. Note that this can be expanded
-// in the future, and so it is only as up to date as the client. The ordering of
-// this slice is not guaranteed to be stable across updates.
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (BucketType) Values() []BucketType {
return []BucketType{
"Directory",
@@ -191,8 +198,9 @@ const (
)
// Values returns all known values for BucketVersioningStatus. Note that this can
-// be expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (BucketVersioningStatus) Values() []BucketVersioningStatus {
return []BucketVersioningStatus{
"Enabled",
@@ -211,8 +219,9 @@ const (
)
// Values returns all known values for ChecksumAlgorithm. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (ChecksumAlgorithm) Values() []ChecksumAlgorithm {
return []ChecksumAlgorithm{
"CRC32",
@@ -230,8 +239,9 @@ const (
)
// Values returns all known values for ChecksumMode. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (ChecksumMode) Values() []ChecksumMode {
return []ChecksumMode{
"ENABLED",
@@ -248,8 +258,9 @@ const (
)
// Values returns all known values for CompressionType. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (CompressionType) Values() []CompressionType {
return []CompressionType{
"NONE",
@@ -266,8 +277,9 @@ const (
)
// Values returns all known values for DataRedundancy. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (DataRedundancy) Values() []DataRedundancy {
return []DataRedundancy{
"SingleAvailabilityZone",
@@ -284,8 +296,9 @@ const (
// Values returns all known values for DeleteMarkerReplicationStatus. Note that
// this can be expanded in the future, and so it is only as up to date as the
-// client. The ordering of this slice is not guaranteed to be stable across
-// updates.
+// client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (DeleteMarkerReplicationStatus) Values() []DeleteMarkerReplicationStatus {
return []DeleteMarkerReplicationStatus{
"Enabled",
@@ -301,8 +314,9 @@ const (
)
// Values returns all known values for EncodingType. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (EncodingType) Values() []EncodingType {
return []EncodingType{
"url",
@@ -343,8 +357,9 @@ const (
)
// Values returns all known values for Event. Note that this can be expanded in
-// the future, and so it is only as up to date as the client. The ordering of this
-// slice is not guaranteed to be stable across updates.
+// the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (Event) Values() []Event {
return []Event{
"s3:ReducedRedundancyLostObject",
@@ -387,8 +402,9 @@ const (
// Values returns all known values for ExistingObjectReplicationStatus. Note that
// this can be expanded in the future, and so it is only as up to date as the
-// client. The ordering of this slice is not guaranteed to be stable across
-// updates.
+// client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (ExistingObjectReplicationStatus) Values() []ExistingObjectReplicationStatus {
return []ExistingObjectReplicationStatus{
"Enabled",
@@ -405,8 +421,9 @@ const (
)
// Values returns all known values for ExpirationStatus. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (ExpirationStatus) Values() []ExpirationStatus {
return []ExpirationStatus{
"Enabled",
@@ -422,8 +439,9 @@ const (
)
// Values returns all known values for ExpressionType. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (ExpressionType) Values() []ExpressionType {
return []ExpressionType{
"SQL",
@@ -440,8 +458,9 @@ const (
)
// Values returns all known values for FileHeaderInfo. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (FileHeaderInfo) Values() []FileHeaderInfo {
return []FileHeaderInfo{
"USE",
@@ -459,8 +478,9 @@ const (
)
// Values returns all known values for FilterRuleName. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (FilterRuleName) Values() []FilterRuleName {
return []FilterRuleName{
"prefix",
@@ -478,8 +498,9 @@ const (
// Values returns all known values for IntelligentTieringAccessTier. Note that
// this can be expanded in the future, and so it is only as up to date as the
-// client. The ordering of this slice is not guaranteed to be stable across
-// updates.
+// client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (IntelligentTieringAccessTier) Values() []IntelligentTieringAccessTier {
return []IntelligentTieringAccessTier{
"ARCHIVE_ACCESS",
@@ -497,6 +518,7 @@ const (
// Values returns all known values for IntelligentTieringStatus. Note that this
// can be expanded in the future, and so it is only as up to date as the client.
+//
// The ordering of this slice is not guaranteed to be stable across updates.
func (IntelligentTieringStatus) Values() []IntelligentTieringStatus {
return []IntelligentTieringStatus{
@@ -515,8 +537,9 @@ const (
)
// Values returns all known values for InventoryFormat. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (InventoryFormat) Values() []InventoryFormat {
return []InventoryFormat{
"CSV",
@@ -534,8 +557,9 @@ const (
)
// Values returns all known values for InventoryFrequency. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (InventoryFrequency) Values() []InventoryFrequency {
return []InventoryFrequency{
"Daily",
@@ -553,8 +577,9 @@ const (
// Values returns all known values for InventoryIncludedObjectVersions. Note that
// this can be expanded in the future, and so it is only as up to date as the
-// client. The ordering of this slice is not guaranteed to be stable across
-// updates.
+// client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (InventoryIncludedObjectVersions) Values() []InventoryIncludedObjectVersions {
return []InventoryIncludedObjectVersions{
"All",
@@ -584,8 +609,9 @@ const (
)
// Values returns all known values for InventoryOptionalField. Note that this can
-// be expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (InventoryOptionalField) Values() []InventoryOptionalField {
return []InventoryOptionalField{
"Size",
@@ -615,8 +641,9 @@ const (
)
// Values returns all known values for JSONType. Note that this can be expanded in
-// the future, and so it is only as up to date as the client. The ordering of this
-// slice is not guaranteed to be stable across updates.
+// the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (JSONType) Values() []JSONType {
return []JSONType{
"DOCUMENT",
@@ -632,8 +659,9 @@ const (
)
// Values returns all known values for LocationType. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (LocationType) Values() []LocationType {
return []LocationType{
"AvailabilityZone",
@@ -649,8 +677,9 @@ const (
)
// Values returns all known values for MetadataDirective. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (MetadataDirective) Values() []MetadataDirective {
return []MetadataDirective{
"COPY",
@@ -667,8 +696,9 @@ const (
)
// Values returns all known values for MetricsStatus. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (MetricsStatus) Values() []MetricsStatus {
return []MetricsStatus{
"Enabled",
@@ -685,8 +715,9 @@ const (
)
// Values returns all known values for MFADelete. Note that this can be expanded
-// in the future, and so it is only as up to date as the client. The ordering of
-// this slice is not guaranteed to be stable across updates.
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (MFADelete) Values() []MFADelete {
return []MFADelete{
"Enabled",
@@ -703,8 +734,9 @@ const (
)
// Values returns all known values for MFADeleteStatus. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (MFADeleteStatus) Values() []MFADeleteStatus {
return []MFADeleteStatus{
"Enabled",
@@ -724,8 +756,9 @@ const (
)
// Values returns all known values for ObjectAttributes. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (ObjectAttributes) Values() []ObjectAttributes {
return []ObjectAttributes{
"ETag",
@@ -750,8 +783,9 @@ const (
)
// Values returns all known values for ObjectCannedACL. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (ObjectCannedACL) Values() []ObjectCannedACL {
return []ObjectCannedACL{
"private",
@@ -772,8 +806,9 @@ const (
)
// Values returns all known values for ObjectLockEnabled. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (ObjectLockEnabled) Values() []ObjectLockEnabled {
return []ObjectLockEnabled{
"Enabled",
@@ -790,6 +825,7 @@ const (
// Values returns all known values for ObjectLockLegalHoldStatus. Note that this
// can be expanded in the future, and so it is only as up to date as the client.
+//
// The ordering of this slice is not guaranteed to be stable across updates.
func (ObjectLockLegalHoldStatus) Values() []ObjectLockLegalHoldStatus {
return []ObjectLockLegalHoldStatus{
@@ -807,8 +843,9 @@ const (
)
// Values returns all known values for ObjectLockMode. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (ObjectLockMode) Values() []ObjectLockMode {
return []ObjectLockMode{
"GOVERNANCE",
@@ -825,8 +862,9 @@ const (
)
// Values returns all known values for ObjectLockRetentionMode. Note that this can
-// be expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (ObjectLockRetentionMode) Values() []ObjectLockRetentionMode {
return []ObjectLockRetentionMode{
"GOVERNANCE",
@@ -844,8 +882,9 @@ const (
)
// Values returns all known values for ObjectOwnership. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (ObjectOwnership) Values() []ObjectOwnership {
return []ObjectOwnership{
"BucketOwnerPreferred",
@@ -872,8 +911,9 @@ const (
)
// Values returns all known values for ObjectStorageClass. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (ObjectStorageClass) Values() []ObjectStorageClass {
return []ObjectStorageClass{
"STANDARD",
@@ -899,6 +939,7 @@ const (
// Values returns all known values for ObjectVersionStorageClass. Note that this
// can be expanded in the future, and so it is only as up to date as the client.
+//
// The ordering of this slice is not guaranteed to be stable across updates.
func (ObjectVersionStorageClass) Values() []ObjectVersionStorageClass {
return []ObjectVersionStorageClass{
@@ -915,6 +956,7 @@ const (
// Values returns all known values for OptionalObjectAttributes. Note that this
// can be expanded in the future, and so it is only as up to date as the client.
+//
// The ordering of this slice is not guaranteed to be stable across updates.
func (OptionalObjectAttributes) Values() []OptionalObjectAttributes {
return []OptionalObjectAttributes{
@@ -930,8 +972,9 @@ const (
)
// Values returns all known values for OwnerOverride. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (OwnerOverride) Values() []OwnerOverride {
return []OwnerOverride{
"Destination",
@@ -947,8 +990,9 @@ const (
)
// Values returns all known values for PartitionDateSource. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (PartitionDateSource) Values() []PartitionDateSource {
return []PartitionDateSource{
"EventTime",
@@ -965,8 +1009,9 @@ const (
)
// Values returns all known values for Payer. Note that this can be expanded in
-// the future, and so it is only as up to date as the client. The ordering of this
-// slice is not guaranteed to be stable across updates.
+// the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (Payer) Values() []Payer {
return []Payer{
"Requester",
@@ -986,8 +1031,9 @@ const (
)
// Values returns all known values for Permission. Note that this can be expanded
-// in the future, and so it is only as up to date as the client. The ordering of
-// this slice is not guaranteed to be stable across updates.
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (Permission) Values() []Permission {
return []Permission{
"FULL_CONTROL",
@@ -1007,8 +1053,9 @@ const (
)
// Values returns all known values for Protocol. Note that this can be expanded in
-// the future, and so it is only as up to date as the client. The ordering of this
-// slice is not guaranteed to be stable across updates.
+// the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (Protocol) Values() []Protocol {
return []Protocol{
"http",
@@ -1025,8 +1072,9 @@ const (
)
// Values returns all known values for QuoteFields. Note that this can be expanded
-// in the future, and so it is only as up to date as the client. The ordering of
-// this slice is not guaranteed to be stable across updates.
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (QuoteFields) Values() []QuoteFields {
return []QuoteFields{
"ALWAYS",
@@ -1044,6 +1092,7 @@ const (
// Values returns all known values for ReplicaModificationsStatus. Note that this
// can be expanded in the future, and so it is only as up to date as the client.
+//
// The ordering of this slice is not guaranteed to be stable across updates.
func (ReplicaModificationsStatus) Values() []ReplicaModificationsStatus {
return []ReplicaModificationsStatus{
@@ -1061,8 +1110,9 @@ const (
)
// Values returns all known values for ReplicationRuleStatus. Note that this can
-// be expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (ReplicationRuleStatus) Values() []ReplicationRuleStatus {
return []ReplicationRuleStatus{
"Enabled",
@@ -1082,8 +1132,9 @@ const (
)
// Values returns all known values for ReplicationStatus. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (ReplicationStatus) Values() []ReplicationStatus {
return []ReplicationStatus{
"COMPLETE",
@@ -1103,8 +1154,9 @@ const (
)
// Values returns all known values for ReplicationTimeStatus. Note that this can
-// be expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (ReplicationTimeStatus) Values() []ReplicationTimeStatus {
return []ReplicationTimeStatus{
"Enabled",
@@ -1120,8 +1172,9 @@ const (
)
// Values returns all known values for RequestCharged. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (RequestCharged) Values() []RequestCharged {
return []RequestCharged{
"requester",
@@ -1136,8 +1189,9 @@ const (
)
// Values returns all known values for RequestPayer. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (RequestPayer) Values() []RequestPayer {
return []RequestPayer{
"requester",
@@ -1152,8 +1206,9 @@ const (
)
// Values returns all known values for RestoreRequestType. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (RestoreRequestType) Values() []RestoreRequestType {
return []RestoreRequestType{
"SELECT",
@@ -1170,8 +1225,9 @@ const (
)
// Values returns all known values for ServerSideEncryption. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (ServerSideEncryption) Values() []ServerSideEncryption {
return []ServerSideEncryption{
"AES256",
@@ -1189,8 +1245,9 @@ const (
)
// Values returns all known values for SessionMode. Note that this can be expanded
-// in the future, and so it is only as up to date as the client. The ordering of
-// this slice is not guaranteed to be stable across updates.
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (SessionMode) Values() []SessionMode {
return []SessionMode{
"ReadOnly",
@@ -1208,8 +1265,9 @@ const (
// Values returns all known values for SseKmsEncryptedObjectsStatus. Note that
// this can be expanded in the future, and so it is only as up to date as the
-// client. The ordering of this slice is not guaranteed to be stable across
-// updates.
+// client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (SseKmsEncryptedObjectsStatus) Values() []SseKmsEncryptedObjectsStatus {
return []SseKmsEncryptedObjectsStatus{
"Enabled",
@@ -1235,8 +1293,9 @@ const (
)
// Values returns all known values for StorageClass. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (StorageClass) Values() []StorageClass {
return []StorageClass{
"STANDARD",
@@ -1262,8 +1321,9 @@ const (
// Values returns all known values for StorageClassAnalysisSchemaVersion. Note
// that this can be expanded in the future, and so it is only as up to date as the
-// client. The ordering of this slice is not guaranteed to be stable across
-// updates.
+// client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (StorageClassAnalysisSchemaVersion) Values() []StorageClassAnalysisSchemaVersion {
return []StorageClassAnalysisSchemaVersion{
"V_1",
@@ -1279,8 +1339,9 @@ const (
)
// Values returns all known values for TaggingDirective. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (TaggingDirective) Values() []TaggingDirective {
return []TaggingDirective{
"COPY",
@@ -1298,8 +1359,9 @@ const (
)
// Values returns all known values for Tier. Note that this can be expanded in the
-// future, and so it is only as up to date as the client. The ordering of this
-// slice is not guaranteed to be stable across updates.
+// future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (Tier) Values() []Tier {
return []Tier{
"Standard",
@@ -1321,8 +1383,9 @@ const (
)
// Values returns all known values for TransitionStorageClass. Note that this can
-// be expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (TransitionStorageClass) Values() []TransitionStorageClass {
return []TransitionStorageClass{
"GLACIER",
@@ -1344,8 +1407,9 @@ const (
)
// Values returns all known values for Type. Note that this can be expanded in the
-// future, and so it is only as up to date as the client. The ordering of this
-// slice is not guaranteed to be stable across updates.
+// future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (Type) Values() []Type {
return []Type{
"CanonicalUser",
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go
index 166484f4ec..a01b922f73 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go
@@ -64,14 +64,17 @@ func (e *BucketAlreadyOwnedByYou) ErrorCode() string {
}
func (e *BucketAlreadyOwnedByYou) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
-// Object is archived and inaccessible until restored. If the object you are
-// retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3
-// Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access
-// tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can
-// retrieve the object you must first restore a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html)
-// . Otherwise, this operation returns an InvalidObjectState error. For
-// information about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html)
-// in the Amazon S3 User Guide.
+// Object is archived and inaccessible until restored.
+//
+// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval
+// storage class, the S3 Glacier Deep Archive storage class, the S3
+// Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep
+// Archive Access tier, before you can retrieve the object you must first restore a
+// copy using [RestoreObject]. Otherwise, this operation returns an InvalidObjectState error. For
+// information about restoring archived objects, see [Restoring Archived Objects]in the Amazon S3 User Guide.
+//
+// [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html
+// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html
type InvalidObjectState struct {
Message *string
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go
index 4299b57cc6..08225e62f2 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go
@@ -9,9 +9,9 @@ import (
// Specifies the days since the initiation of an incomplete multipart upload that
// Amazon S3 will wait before permanently removing all parts of the upload. For
-// more information, see Aborting Incomplete Multipart Uploads Using a Bucket
-// Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
-// in the Amazon S3 User Guide.
+// more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide.
+//
+// [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config
type AbortIncompleteMultipartUpload struct {
// Specifies the number of days after which Amazon S3 aborts an incomplete
@@ -22,8 +22,9 @@ type AbortIncompleteMultipartUpload struct {
}
// Configures the transfer acceleration state for an Amazon S3 bucket. For more
-// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html)
-// in the Amazon S3 User Guide.
+// information, see [Amazon S3 Transfer Acceleration]in the Amazon S3 User Guide.
+//
+// [Amazon S3 Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
type AccelerateConfiguration struct {
// Specifies the transfer acceleration status of the bucket.
@@ -47,9 +48,10 @@ type AccessControlPolicy struct {
// A container for information about access control for replicas.
type AccessControlTranslation struct {
- // Specifies the replica ownership. For default and valid values, see PUT bucket
- // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html)
- // in the Amazon S3 API Reference.
+ // Specifies the replica ownership. For default and valid values, see [PUT bucket replication] in the
+ // Amazon S3 API Reference.
+ //
+ // [PUT bucket replication]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html
//
// This member is required.
Owner OwnerOverride
@@ -82,7 +84,7 @@ type AnalyticsConfiguration struct {
// This member is required.
Id *string
- // Contains data related to access patterns to be collected and made available to
+ // Contains data related to access patterns to be collected and made available to
// analyze the tradeoffs between different storage classes.
//
// This member is required.
@@ -162,9 +164,10 @@ type AnalyticsS3BucketDestination struct {
Format AnalyticsS3ExportFileFormat
// The account ID that owns the destination S3 bucket. If no account ID is
- // provided, the owner is not validated before exporting data. Although this value
- // is optional, we strongly recommend that you set it to help prevent problems if
- // the destination bucket ownership changes.
+ // provided, the owner is not validated before exporting data.
+ //
+ // Although this value is optional, we strongly recommend that you set it to help
+ // prevent problems if the destination bucket ownership changes.
BucketAccountId *string
// The prefix to use when exporting data. The prefix is prepended to all results.
@@ -187,9 +190,11 @@ type Bucket struct {
}
// Specifies the information about the bucket that will be created. For more
-// information about directory buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html)
-// in the Amazon S3 User Guide. This functionality is only supported by directory
-// buckets.
+// information about directory buckets, see [Directory buckets]in the Amazon S3 User Guide.
+//
+// This functionality is only supported by directory buckets.
+//
+// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html
type BucketInfo struct {
// The number of Availability Zone that's used for redundancy for the bucket.
@@ -202,8 +207,9 @@ type BucketInfo struct {
}
// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. For
-// more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
-// in the Amazon S3 User Guide.
+// more information, see [Object Lifecycle Management]in the Amazon S3 User Guide.
+//
+// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html
type BucketLifecycleConfiguration struct {
// A lifecycle rule for individual objects in an Amazon S3 bucket.
@@ -218,8 +224,10 @@ type BucketLifecycleConfiguration struct {
type BucketLoggingStatus struct {
// Describes where logs are stored and the prefix that Amazon S3 assigns to all
- // log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html)
- // in the Amazon S3 API Reference.
+ // log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API
+ // Reference.
+ //
+ // [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html
LoggingEnabled *LoggingEnabled
noSmithyDocumentSerde
@@ -233,8 +241,10 @@ type Checksum struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumCRC32 *string
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
@@ -242,8 +252,10 @@ type Checksum struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumCRC32C *string
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
@@ -251,8 +263,10 @@ type Checksum struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumSHA1 *string
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
@@ -260,8 +274,10 @@ type Checksum struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumSHA256 *string
noSmithyDocumentSerde
@@ -283,8 +299,10 @@ type CommonPrefix struct {
// The container for the completed multipart upload details.
type CompletedMultipartUpload struct {
- // Array of CompletedPart data types. If you do not supply a valid Part with your
- // request, the service sends back an HTTP 400 response.
+ // Array of CompletedPart data types.
+ //
+ // If you do not supply a valid Part with your request, the service sends back an
+ // HTTP 400 response.
Parts []CompletedPart
noSmithyDocumentSerde
@@ -298,8 +316,10 @@ type CompletedPart struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumCRC32 *string
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
@@ -307,8 +327,10 @@ type CompletedPart struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumCRC32C *string
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
@@ -316,8 +338,10 @@ type CompletedPart struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumSHA1 *string
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
@@ -325,8 +349,10 @@ type CompletedPart struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumSHA256 *string
// Entity tag returned when the part was uploaded.
@@ -334,12 +360,14 @@ type CompletedPart struct {
// Part number that identifies the part. This is a positive integer between 1 and
// 10,000.
+ //
// - General purpose buckets - In CompleteMultipartUpload , when a additional
// checksum (including x-amz-checksum-crc32 , x-amz-checksum-crc32c ,
// x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is applied to each part, the
// PartNumber must start at 1 and the part numbers must be consecutive.
// Otherwise, Amazon S3 generates an HTTP 400 Bad Request status code and an
// InvalidPartOrder error code.
+ //
// - Directory buckets - In CompleteMultipartUpload , the PartNumber must start
// at 1 and the part numbers must be consecutive.
PartNumber *int32
@@ -366,10 +394,12 @@ type Condition struct {
// be /docs , which identifies all objects in the docs/ folder. Required when the
// parent element Condition is specified and sibling HttpErrorCodeReturnedEquals
// is not specified. If both conditions are specified, both must be true for the
- // redirect to be applied. Replacement must be made for object keys containing
- // special characters (such as carriage returns) when using XML requests. For more
- // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints)
- // .
+ // redirect to be applied.
+ //
+ // Replacement must be made for object keys containing special characters (such as
+ // carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+ //
+ // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
KeyPrefixEquals *string
noSmithyDocumentSerde
@@ -383,27 +413,31 @@ type ContinuationEvent struct {
type CopyObjectResult struct {
// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
- // present if it was uploaded with the object. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide.
+ // present if it was uploaded with the object. For more information, see [Checking object integrity]in the
+ // Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumCRC32 *string
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
- // present if it was uploaded with the object. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide.
+ // present if it was uploaded with the object. For more information, see [Checking object integrity]in the
+ // Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumCRC32C *string
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
- // present if it was uploaded with the object. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide.
+ // present if it was uploaded with the object. For more information, see [Checking object integrity]in the
+ // Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumSHA1 *string
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
- // present if it was uploaded with the object. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
- // in the Amazon S3 User Guide.
+ // present if it was uploaded with the object. For more information, see [Checking object integrity]in the
+ // Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumSHA256 *string
// Returns the ETag of the new object. The ETag reflects only changes to the
@@ -424,8 +458,10 @@ type CopyPartResult struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumCRC32 *string
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
@@ -433,8 +469,10 @@ type CopyPartResult struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumCRC32C *string
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
@@ -442,8 +480,10 @@ type CopyPartResult struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumSHA1 *string
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
@@ -451,8 +491,10 @@ type CopyPartResult struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumSHA256 *string
// Entity tag of the object.
@@ -465,8 +507,9 @@ type CopyPartResult struct {
}
// Describes the cross-origin access configuration for objects in an Amazon S3
-// bucket. For more information, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html)
-// in the Amazon S3 User Guide.
+// bucket. For more information, see [Enabling Cross-Origin Resource Sharing]in the Amazon S3 User Guide.
+//
+// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html
type CORSConfiguration struct {
// A set of origins and methods (cross-origin access that you want to allow). You
@@ -515,23 +558,30 @@ type CORSRule struct {
// The configuration information for the bucket.
type CreateBucketConfiguration struct {
- // Specifies the information about the bucket that will be created. This
- // functionality is only supported by directory buckets.
+ // Specifies the information about the bucket that will be created.
+ //
+ // This functionality is only supported by directory buckets.
Bucket *BucketInfo
- // Specifies the location where the bucket will be created. For directory buckets,
- // the location type is Availability Zone. This functionality is only supported by
- // directory buckets.
+ // Specifies the location where the bucket will be created.
+ //
+ // For directory buckets, the location type is Availability Zone.
+ //
+ // This functionality is only supported by directory buckets.
Location *LocationInfo
// Specifies the Region where the bucket will be created. You might choose a
// Region to optimize latency, minimize costs, or address regulatory requirements.
// For example, if you reside in Europe, you will probably find it advantageous to
- // create buckets in the Europe (Ireland) Region. For more information, see
- // Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
- // in the Amazon S3 User Guide. If you don't specify a Region, the bucket is
- // created in the US East (N. Virginia) Region (us-east-1) by default. This
- // functionality is not supported for directory buckets.
+ // create buckets in the Europe (Ireland) Region. For more information, see [Accessing a bucket]in the
+ // Amazon S3 User Guide.
+ //
+ // If you don't specify a Region, the bucket is created in the US East (N.
+ // Virginia) Region (us-east-1) by default.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Accessing a bucket]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro
LocationConstraint BucketLocationConstraint
noSmithyDocumentSerde
@@ -548,7 +598,9 @@ type CSVInput struct {
// A single character used to indicate that a row should be ignored when the
// character is present at the start of that row. You can specify any character to
- // indicate a comment line. The default character is # . Default: #
+ // indicate a comment line. The default character is # .
+ //
+ // Default: #
Comments *string
// A single character used to separate individual fields in a record. You can
@@ -556,17 +608,26 @@ type CSVInput struct {
FieldDelimiter *string
// Describes the first line of input. Valid values are:
+ //
// - NONE : First line is not a header.
+ //
// - IGNORE : First line is a header, but you can't use the header values to
// indicate the column in an expression. You can use column position (such as _1,
// _2, …) to indicate the column ( SELECT s._1 FROM OBJECT s ).
+ //
// - Use : First line is a header, and you can use the header value to identify a
// column in an expression ( SELECT "name" FROM OBJECT ).
FileHeaderInfo FileHeaderInfo
// A single character used for escaping when the field delimiter is part of the
// value. For example, if the value is a, b , Amazon S3 wraps this field value in
- // quotation marks, as follows: " a , b " . Type: String Default: " Ancestors: CSV
+ // quotation marks, as follows: " a , b " .
+ //
+ // Type: String
+ //
+ // Default: "
+ //
+ // Ancestors: CSV
QuoteCharacter *string
// A single character used for escaping the quotation mark character inside an
@@ -599,7 +660,9 @@ type CSVOutput struct {
QuoteEscapeCharacter *string
// Indicates whether to use quotation marks around output fields.
+ //
// - ALWAYS : Always use quotation marks for output fields.
+ //
// - ASNEEDED : Use quotation marks for output fields when needed.
QuoteFields QuoteFields
@@ -612,7 +675,9 @@ type CSVOutput struct {
// The container element for specifying the default Object Lock retention settings
// for new objects placed in the specified bucket.
+//
// - The DefaultRetention settings require both a mode and a period.
+//
// - The DefaultRetention period can be either Days or Years but you must select
// one. You cannot specify Days and Years at the same time.
type DefaultRetention struct {
@@ -635,10 +700,12 @@ type DefaultRetention struct {
// Container for the objects to delete.
type Delete struct {
- // The object to delete. Directory buckets - For directory buckets, an object
- // that's composed entirely of whitespace characters is not supported by the
- // DeleteObjects API operation. The request will receive a 400 Bad Request error
- // and none of the objects in the request will be deleted.
+ // The object to delete.
+ //
+ // Directory buckets - For directory buckets, an object that's composed entirely
+ // of whitespace characters is not supported by the DeleteObjects API operation.
+ // The request will receive a 400 Bad Request error and none of the objects in the
+ // request will be deleted.
//
// This member is required.
Objects []ObjectIdentifier
@@ -656,21 +723,24 @@ type DeletedObject struct {
// Indicates whether the specified object version that was permanently deleted was
// (true) or was not (false) a delete marker before deletion. In a simple DELETE,
// this header indicates whether (true) or not (false) the current version of the
- // object is a delete marker. This functionality is not supported for directory
- // buckets.
+ // object is a delete marker.
+ //
+ // This functionality is not supported for directory buckets.
DeleteMarker *bool
// The version ID of the delete marker created as a result of the DELETE
// operation. If you delete a specific object version, the value returned by this
- // header is the version ID of the object version deleted. This functionality is
- // not supported for directory buckets.
+ // header is the version ID of the object version deleted.
+ //
+ // This functionality is not supported for directory buckets.
DeleteMarkerVersionId *string
// The name of the deleted object.
Key *string
- // The version ID of the deleted object. This functionality is not supported for
- // directory buckets.
+ // The version ID of the deleted object.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string
noSmithyDocumentSerde
@@ -703,17 +773,20 @@ type DeleteMarkerEntry struct {
// DeleteMarkerReplication element. If your Filter includes a Tag element, the
// DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does
// not support replicating delete markers for tag-based rules. For an example
-// configuration, see Basic Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config)
-// . For more information about delete marker replication, see Basic Rule
-// Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html)
-// . If you are using an earlier version of the replication configuration, Amazon
-// S3 handles replication of delete markers differently. For more information, see
-// Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations)
-// .
+// configuration, see [Basic Rule Configuration].
+//
+// For more information about delete marker replication, see [Basic Rule Configuration].
+//
+// If you are using an earlier version of the replication configuration, Amazon S3
+// handles replication of delete markers differently. For more information, see [Backward Compatibility].
+//
+// [Basic Rule Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html
+// [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations
type DeleteMarkerReplication struct {
- // Indicates whether to replicate delete markers. Indicates whether to replicate
- // delete markers.
+ // Indicates whether to replicate delete markers.
+ //
+ // Indicates whether to replicate delete markers.
Status DeleteMarkerReplicationStatus
noSmithyDocumentSerde
@@ -723,7 +796,7 @@ type DeleteMarkerReplication struct {
// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC).
type Destination struct {
- // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store
+ // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store
// the results.
//
// This member is required.
@@ -740,29 +813,32 @@ type Destination struct {
// Amazon S3 to change replica ownership to the Amazon Web Services account that
// owns the destination bucket by specifying the AccessControlTranslation
// property, this is the account ID of the destination bucket owner. For more
- // information, see Replication Additional Configuration: Changing the Replica
- // Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html)
- // in the Amazon S3 User Guide.
+ // information, see [Replication Additional Configuration: Changing the Replica Owner]in the Amazon S3 User Guide.
+ //
+ // [Replication Additional Configuration: Changing the Replica Owner]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html
Account *string
// A container that provides information about encryption. If
// SourceSelectionCriteria is specified, you must specify this element.
EncryptionConfiguration *EncryptionConfiguration
- // A container specifying replication metrics-related settings enabling
+ // A container specifying replication metrics-related settings enabling
// replication metrics and events.
Metrics *Metrics
- // A container specifying S3 Replication Time Control (S3 RTC), including whether
+ // A container specifying S3 Replication Time Control (S3 RTC), including whether
// S3 RTC is enabled and the time when all objects and operations on objects must
// be replicated. Must be specified together with a Metrics block.
ReplicationTime *ReplicationTime
- // The storage class to use when replicating objects, such as S3 Standard or
+ // The storage class to use when replicating objects, such as S3 Standard or
// reduced redundancy. By default, Amazon S3 uses the storage class of the source
- // object to create the object replica. For valid values, see the StorageClass
- // element of the PUT Bucket replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html)
- // action in the Amazon S3 API Reference.
+ // object to create the object replica.
+ //
+ // For valid values, see the StorageClass element of the [PUT Bucket replication] action in the Amazon S3
+ // API Reference.
+ //
+ // [PUT Bucket replication]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html
StorageClass StorageClass
noSmithyDocumentSerde
@@ -784,8 +860,9 @@ type Encryption struct {
// If the encryption type is aws:kms , this optional value specifies the ID of the
// symmetric encryption customer managed key to use for encryption of job results.
// Amazon S3 only supports symmetric encryption KMS keys. For more information, see
- // Asymmetric keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html)
- // in the Amazon Web Services Key Management Service Developer Guide.
+ // [Asymmetric keys in KMS]in the Amazon Web Services Key Management Service Developer Guide.
+ //
+ // [Asymmetric keys in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html
KMSKeyId *string
noSmithyDocumentSerde
@@ -799,8 +876,9 @@ type EncryptionConfiguration struct {
// Services KMS key stored in Amazon Web Services Key Management Service (KMS) for
// the destination bucket. Amazon S3 uses this key to encrypt replica objects.
// Amazon S3 only supports symmetric encryption KMS keys. For more information, see
- // Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html)
- // in the Amazon Web Services Key Management Service Developer Guide.
+ // [Asymmetric keys in Amazon Web Services KMS]in the Amazon Web Services Key Management Service Developer Guide.
+ //
+ // [Asymmetric keys in Amazon Web Services KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html
ReplicaKmsKeyID *string
noSmithyDocumentSerde
@@ -819,414 +897,766 @@ type Error struct {
// The error code is a string that uniquely identifies an error condition. It is
// meant to be read and understood by programs that detect and handle errors by
// type. The following is a list of Amazon S3 error codes. For more information,
- // see Error responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html)
- // .
+ // see [Error responses].
+ //
// - Code: AccessDenied
+ //
// - Description: Access Denied
+ //
// - HTTP Status Code: 403 Forbidden
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: AccountProblem
+ //
// - Description: There is a problem with your Amazon Web Services account that
// prevents the action from completing successfully. Contact Amazon Web Services
// Support for further assistance.
+ //
// - HTTP Status Code: 403 Forbidden
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: AllAccessDisabled
+ //
// - Description: All access to this Amazon S3 resource has been disabled.
// Contact Amazon Web Services Support for further assistance.
+ //
// - HTTP Status Code: 403 Forbidden
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: AmbiguousGrantByEmailAddress
+ //
// - Description: The email address you provided is associated with more than
// one account.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: AuthorizationHeaderMalformed
+ //
// - Description: The authorization header you provided is invalid.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - HTTP Status Code: N/A
+ //
// - Code: BadDigest
+ //
// - Description: The Content-MD5 you specified did not match what we received.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: BucketAlreadyExists
+ //
// - Description: The requested bucket name is not available. The bucket
// namespace is shared by all users of the system. Please select a different name
// and try again.
+ //
// - HTTP Status Code: 409 Conflict
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: BucketAlreadyOwnedByYou
+ //
// - Description: The bucket you tried to create already exists, and you own it.
// Amazon S3 returns this error in all Amazon Web Services Regions except in the
// North Virginia Region. For legacy compatibility, if you re-create an existing
// bucket that you already own in the North Virginia Region, Amazon S3 returns 200
// OK and resets the bucket access control lists (ACLs).
+ //
// - Code: 409 Conflict (in all Regions except the North Virginia Region)
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: BucketNotEmpty
+ //
// - Description: The bucket you tried to delete is not empty.
+ //
// - HTTP Status Code: 409 Conflict
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: CredentialsNotSupported
+ //
// - Description: This request does not support credentials.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: CrossLocationLoggingProhibited
+ //
// - Description: Cross-location logging not allowed. Buckets in one geographic
// location cannot log information to a bucket in another location.
+ //
// - HTTP Status Code: 403 Forbidden
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: EntityTooSmall
+ //
// - Description: Your proposed upload is smaller than the minimum allowed
// object size.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: EntityTooLarge
+ //
// - Description: Your proposed upload exceeds the maximum allowed object size.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: ExpiredToken
+ //
// - Description: The provided token has expired.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: IllegalVersioningConfigurationException
+ //
// - Description: Indicates that the versioning configuration specified in the
// request is invalid.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: IncompleteBody
+ //
// - Description: You did not provide the number of bytes specified by the
// Content-Length HTTP header
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: IncorrectNumberOfFilesInPostRequest
+ //
// - Description: POST requires exactly one file upload per request.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InlineDataTooLarge
+ //
// - Description: Inline data exceeds the maximum allowed size.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InternalError
+ //
// - Description: We encountered an internal error. Please try again.
+ //
// - HTTP Status Code: 500 Internal Server Error
+ //
// - SOAP Fault Code Prefix: Server
+ //
// - Code: InvalidAccessKeyId
+ //
// - Description: The Amazon Web Services access key ID you provided does not
// exist in our records.
+ //
// - HTTP Status Code: 403 Forbidden
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InvalidAddressingHeader
+ //
// - Description: You must specify the Anonymous role.
+ //
// - HTTP Status Code: N/A
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InvalidArgument
+ //
// - Description: Invalid Argument
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InvalidBucketName
+ //
// - Description: The specified bucket is not valid.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InvalidBucketState
+ //
// - Description: The request is not valid with the current state of the bucket.
+ //
// - HTTP Status Code: 409 Conflict
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InvalidDigest
+ //
// - Description: The Content-MD5 you specified is not valid.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InvalidEncryptionAlgorithmError
+ //
// - Description: The encryption request you specified is not valid. The valid
// value is AES256.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InvalidLocationConstraint
+ //
// - Description: The specified location constraint is not valid. For more
- // information about Regions, see How to Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
- // .
+ // information about Regions, see [How to Select a Region for Your Buckets].
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InvalidObjectState
+ //
// - Description: The action is not valid for the current state of the object.
+ //
// - HTTP Status Code: 403 Forbidden
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InvalidPart
+ //
// - Description: One or more of the specified parts could not be found. The
// part might not have been uploaded, or the specified entity tag might not have
// matched the part's entity tag.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InvalidPartOrder
+ //
// - Description: The list of parts was not in ascending order. Parts list must
// be specified in order by part number.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InvalidPayer
+ //
// - Description: All access to this object has been disabled. Please contact
// Amazon Web Services Support for further assistance.
+ //
// - HTTP Status Code: 403 Forbidden
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InvalidPolicyDocument
+ //
// - Description: The content of the form does not meet the conditions specified
// in the policy document.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InvalidRange
+ //
// - Description: The requested range cannot be satisfied.
+ //
// - HTTP Status Code: 416 Requested Range Not Satisfiable
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InvalidRequest
+ //
// - Description: Please use AWS4-HMAC-SHA256 .
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - Code: N/A
+ //
// - Code: InvalidRequest
+ //
// - Description: SOAP requests must be made over an HTTPS connection.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InvalidRequest
+ //
// - Description: Amazon S3 Transfer Acceleration is not supported for buckets
// with non-DNS compliant names.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - Code: N/A
+ //
// - Code: InvalidRequest
+ //
// - Description: Amazon S3 Transfer Acceleration is not supported for buckets
// with periods (.) in their names.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - Code: N/A
+ //
// - Code: InvalidRequest
+ //
// - Description: Amazon S3 Transfer Accelerate endpoint only supports virtual
// style requests.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - Code: N/A
+ //
// - Code: InvalidRequest
- // - Description: Amazon S3 Transfer Accelerate is not configured on this
- // bucket.
+ //
+ // - Description: Amazon S3 Transfer Accelerate is not configured on this bucket.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - Code: N/A
+ //
// - Code: InvalidRequest
+ //
// - Description: Amazon S3 Transfer Accelerate is disabled on this bucket.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - Code: N/A
+ //
// - Code: InvalidRequest
+ //
// - Description: Amazon S3 Transfer Acceleration is not supported on this
// bucket. Contact Amazon Web Services Support for more information.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - Code: N/A
+ //
// - Code: InvalidRequest
+ //
// - Description: Amazon S3 Transfer Acceleration cannot be enabled on this
// bucket. Contact Amazon Web Services Support for more information.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - Code: N/A
+ //
// - Code: InvalidSecurity
+ //
// - Description: The provided security credentials are not valid.
+ //
// - HTTP Status Code: 403 Forbidden
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InvalidSOAPRequest
+ //
// - Description: The SOAP request body is invalid.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InvalidStorageClass
+ //
// - Description: The storage class you specified is not valid.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InvalidTargetBucketForLogging
+ //
// - Description: The target bucket for logging does not exist, is not owned by
// you, or does not have the appropriate grants for the log-delivery group.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InvalidToken
+ //
// - Description: The provided token is malformed or otherwise invalid.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: InvalidURI
+ //
// - Description: Couldn't parse the specified URI.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: KeyTooLongError
+ //
// - Description: Your key is too long.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: MalformedACLError
+ //
// - Description: The XML you provided was not well-formed or did not validate
// against our published schema.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: MalformedPOSTRequest
+ //
// - Description: The body of your POST request is not well-formed
// multipart/form-data.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: MalformedXML
+ //
// - Description: This happens when the user sends malformed XML (XML that
// doesn't conform to the published XSD) for the configuration. The error message
// is, "The XML you provided was not well-formed or did not validate against our
// published schema."
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: MaxMessageLengthExceeded
+ //
// - Description: Your request was too big.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: MaxPostPreDataLengthExceededError
+ //
// - Description: Your POST request fields preceding the upload file were too
// large.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: MetadataTooLarge
- // - Description: Your metadata headers exceed the maximum allowed metadata
- // size.
+ //
+ // - Description: Your metadata headers exceed the maximum allowed metadata size.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: MethodNotAllowed
+ //
// - Description: The specified method is not allowed against this resource.
+ //
// - HTTP Status Code: 405 Method Not Allowed
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: MissingAttachment
+ //
// - Description: A SOAP attachment was expected, but none were found.
+ //
// - HTTP Status Code: N/A
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: MissingContentLength
+ //
// - Description: You must provide the Content-Length HTTP header.
+ //
// - HTTP Status Code: 411 Length Required
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: MissingRequestBodyError
+ //
// - Description: This happens when the user sends an empty XML document as a
// request. The error message is, "Request body is empty."
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: MissingSecurityElement
+ //
// - Description: The SOAP 1.1 request is missing a security element.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: MissingSecurityHeader
+ //
// - Description: Your request is missing a required header.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: NoLoggingStatusForKey
+ //
// - Description: There is no such thing as a logging status subresource for a
// key.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: NoSuchBucket
+ //
// - Description: The specified bucket does not exist.
+ //
// - HTTP Status Code: 404 Not Found
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: NoSuchBucketPolicy
+ //
// - Description: The specified bucket does not have a bucket policy.
+ //
// - HTTP Status Code: 404 Not Found
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: NoSuchKey
+ //
// - Description: The specified key does not exist.
+ //
// - HTTP Status Code: 404 Not Found
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: NoSuchLifecycleConfiguration
+ //
// - Description: The lifecycle configuration does not exist.
+ //
// - HTTP Status Code: 404 Not Found
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: NoSuchUpload
+ //
// - Description: The specified multipart upload does not exist. The upload ID
// might be invalid, or the multipart upload might have been aborted or completed.
+ //
// - HTTP Status Code: 404 Not Found
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: NoSuchVersion
+ //
// - Description: Indicates that the version ID specified in the request does
// not match an existing version.
+ //
// - HTTP Status Code: 404 Not Found
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: NotImplemented
+ //
// - Description: A header you provided implies functionality that is not
// implemented.
+ //
// - HTTP Status Code: 501 Not Implemented
+ //
// - SOAP Fault Code Prefix: Server
+ //
// - Code: NotSignedUp
+ //
// - Description: Your account is not signed up for the Amazon S3 service. You
// must sign up before you can use Amazon S3. You can sign up at the following URL:
- // Amazon S3 (http://aws.amazon.com/s3)
+ // [Amazon S3]
+ //
// - HTTP Status Code: 403 Forbidden
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: OperationAborted
+ //
// - Description: A conflicting conditional action is currently in progress
// against this resource. Try again.
+ //
// - HTTP Status Code: 409 Conflict
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: PermanentRedirect
+ //
// - Description: The bucket you are attempting to access must be addressed
// using the specified endpoint. Send all future requests to this endpoint.
+ //
// - HTTP Status Code: 301 Moved Permanently
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: PreconditionFailed
+ //
// - Description: At least one of the preconditions you specified did not hold.
+ //
// - HTTP Status Code: 412 Precondition Failed
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: Redirect
+ //
// - Description: Temporary redirect.
+ //
// - HTTP Status Code: 307 Moved Temporarily
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: RestoreAlreadyInProgress
+ //
// - Description: Object restore is already in progress.
+ //
// - HTTP Status Code: 409 Conflict
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: RequestIsNotMultiPartContent
+ //
// - Description: Bucket POST must be of the enclosure-type multipart/form-data.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: RequestTimeout
+ //
// - Description: Your socket connection to the server was not read from or
// written to within the timeout period.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: RequestTimeTooSkewed
+ //
// - Description: The difference between the request time and the server's time
// is too large.
+ //
// - HTTP Status Code: 403 Forbidden
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: RequestTorrentOfBucketError
+ //
// - Description: Requesting the torrent file of a bucket is not permitted.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: SignatureDoesNotMatch
+ //
// - Description: The request signature we calculated does not match the
// signature you provided. Check your Amazon Web Services secret access key and
- // signing method. For more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html)
- // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html)
- // for details.
+ // signing method. For more information, see [REST Authentication]and [SOAP Authentication]for details.
+ //
// - HTTP Status Code: 403 Forbidden
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: ServiceUnavailable
+ //
// - Description: Service is unable to handle request.
+ //
// - HTTP Status Code: 503 Service Unavailable
+ //
// - SOAP Fault Code Prefix: Server
+ //
// - Code: SlowDown
+ //
// - Description: Reduce your request rate.
+ //
// - HTTP Status Code: 503 Slow Down
+ //
// - SOAP Fault Code Prefix: Server
+ //
// - Code: TemporaryRedirect
+ //
// - Description: You are being redirected to the bucket while DNS updates.
+ //
// - HTTP Status Code: 307 Moved Temporarily
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: TokenRefreshRequired
+ //
// - Description: The provided token must be refreshed.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: TooManyBuckets
+ //
// - Description: You have attempted to create more buckets than allowed.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: UnexpectedContent
+ //
// - Description: This request does not support content.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: UnresolvableGrantByEmailAddress
+ //
// - Description: The email address you provided does not match any account on
// record.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
// - Code: UserKeyMustBeSpecified
+ //
// - Description: The bucket POST must contain the specified field name. If it
// is specified, check the order of the fields.
+ //
// - HTTP Status Code: 400 Bad Request
+ //
// - SOAP Fault Code Prefix: Client
+ //
+ // [How to Select a Region for Your Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro
+ // [Error responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
+ // [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
+ // [Amazon S3]: http://aws.amazon.com/s3
+ // [SOAP Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html
Code *string
// The error key.
@@ -1240,8 +1670,9 @@ type Error struct {
// error message.
Message *string
- // The version ID of the error. This functionality is not supported for directory
- // buckets.
+ // The version ID of the error.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string
noSmithyDocumentSerde
@@ -1250,11 +1681,12 @@ type Error struct {
// The error information.
type ErrorDocument struct {
- // The object key name to use when a 4XX class error occurs. Replacement must be
- // made for object keys containing special characters (such as carriage returns)
- // when using XML requests. For more information, see XML related object key
- // constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints)
- // .
+ // The object key name to use when a 4XX class error occurs.
+ //
+ // Replacement must be made for object keys containing special characters (such as
+ // carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+ //
+ // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
//
// This member is required.
Key *string
@@ -1268,8 +1700,9 @@ type EventBridgeConfiguration struct {
}
// Optional configuration to replicate existing source bucket objects. For more
-// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication)
-// in the Amazon S3 User Guide.
+// information, see [Replicating Existing Objects]in the Amazon S3 User Guide.
+//
+// [Replicating Existing Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication
type ExistingObjectReplication struct {
// Specifies whether Amazon S3 replicates existing source bucket objects.
@@ -1293,9 +1726,10 @@ type FilterRule struct {
// The object key name prefix or suffix identifying one or more objects to which
// the filtering rule applies. The maximum length is 1,024 characters. Overlapping
- // prefixes and suffixes are not supported. For more information, see Configuring
- // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
- // in the Amazon S3 User Guide.
+ // prefixes and suffixes are not supported. For more information, see [Configuring Event Notifications]in the
+ // Amazon S3 User Guide.
+ //
+ // [Configuring Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
Name FilterRuleName
// The value that the filter searches for in object key names.
@@ -1325,10 +1759,12 @@ type GetObjectAttributesParts struct {
// A container for elements related to a particular part. A response can contain
// zero or more Parts elements.
+ //
// - General purpose buckets - For GetObjectAttributes , if a additional checksum
// (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , x-amz-checksum-sha1
// , or x-amz-checksum-sha256 ) isn't applied to the object specified in the
// request, the response doesn't return Part .
+ //
// - Directory buckets - For GetObjectAttributes , no matter whether a additional
// checksum is applied to the object specified in the request, the response returns
// Part .
@@ -1374,19 +1810,31 @@ type Grantee struct {
// Screen name of the grantee.
DisplayName *string
- // Email address of the grantee. Using email addresses to specify a grantee is
- // only supported in the following Amazon Web Services Regions:
+ // Email address of the grantee.
+ //
+ // Using email addresses to specify a grantee is only supported in the following
+ // Amazon Web Services Regions:
+ //
// - US East (N. Virginia)
+ //
// - US West (N. California)
+ //
// - US West (Oregon)
+ //
// - Asia Pacific (Singapore)
+ //
// - Asia Pacific (Sydney)
+ //
// - Asia Pacific (Tokyo)
+ //
// - Europe (Ireland)
+ //
// - South America (São Paulo)
- // For a list of all the Amazon S3 supported Regions and endpoints, see Regions
- // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
- // in the Amazon Web Services General Reference.
+ //
+ // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints] in the
+ // Amazon Web Services General Reference.
+ //
+ // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
EmailAddress *string
// The canonical user ID of the grantee.
@@ -1405,10 +1853,12 @@ type IndexDocument struct {
// endpoint (for example,if the suffix is index.html and you make a request to
// samplebucket/images/ the data that is returned will be for the object with the
// key name images/index.html) The suffix must not be empty and must not include a
- // slash character. Replacement must be made for object keys containing special
- // characters (such as carriage returns) when using XML requests. For more
- // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints)
- // .
+ // slash character.
+ //
+ // Replacement must be made for object keys containing special characters (such as
+ // carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+ //
+ // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
//
// This member is required.
Suffix *string
@@ -1419,12 +1869,14 @@ type IndexDocument struct {
// Container element that identifies who initiated the multipart upload.
type Initiator struct {
- // Name of the Principal. This functionality is not supported for directory
- // buckets.
+ // Name of the Principal.
+ //
+ // This functionality is not supported for directory buckets.
DisplayName *string
// If the principal is an Amazon Web Services account, it provides the Canonical
// User ID. If the principal is an IAM User, it provides a user ARN value.
+ //
// Directory buckets - If the principal is an Amazon Web Services account, it
// provides the Amazon Web Services account ID. If the principal is an IAM User, it
// provides a user ARN value.
@@ -1467,10 +1919,11 @@ type IntelligentTieringAndOperator struct {
noSmithyDocumentSerde
}
-// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. For
-// information about the S3 Intelligent-Tiering storage class, see Storage class
-// for automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access)
-// .
+// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket.
+//
+// For information about the S3 Intelligent-Tiering storage class, see [Storage class for automatically optimizing frequently and infrequently accessed objects].
+//
+// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access
type IntelligentTieringConfiguration struct {
// The ID used to identify the S3 Intelligent-Tiering configuration.
@@ -1505,10 +1958,12 @@ type IntelligentTieringFilter struct {
And *IntelligentTieringAndOperator
// An object key name prefix that identifies the subset of objects to which the
- // rule applies. Replacement must be made for object keys containing special
- // characters (such as carriage returns) when using XML requests. For more
- // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints)
- // .
+ // rule applies.
+ //
+ // Replacement must be made for object keys containing special characters (such as
+ // carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+ //
+ // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
Prefix *string
// A container of a key value name pair.
@@ -1518,8 +1973,9 @@ type IntelligentTieringFilter struct {
}
// Specifies the inventory configuration for an Amazon S3 bucket. For more
-// information, see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html)
-// in the Amazon S3 API Reference.
+// information, see [GET Bucket inventory]in the Amazon S3 API Reference.
+//
+// [GET Bucket inventory]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html
type InventoryConfiguration struct {
// Contains information about where to publish the inventory results.
@@ -1614,9 +2070,10 @@ type InventoryS3BucketDestination struct {
Format InventoryFormat
// The account ID that owns the destination S3 bucket. If no account ID is
- // provided, the owner is not validated before exporting data. Although this value
- // is optional, we strongly recommend that you set it to help prevent problems if
- // the destination bucket ownership changes.
+ // provided, the owner is not validated before exporting data.
+ //
+ // Although this value is optional, we strongly recommend that you set it to help
+ // prevent problems if the destination bucket ownership changes.
AccountId *string
// Contains the type of server-side encryption used to encrypt the inventory
@@ -1663,8 +2120,9 @@ type JSONOutput struct {
type LambdaFunctionConfiguration struct {
// The Amazon S3 bucket event for which to invoke the Lambda function. For more
- // information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
- // in the Amazon S3 User Guide.
+ // information, see [Supported Event Types]in the Amazon S3 User Guide.
+ //
+ // [Supported Event Types]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
//
// This member is required.
Events []Event
@@ -1676,8 +2134,9 @@ type LambdaFunctionConfiguration struct {
LambdaFunctionArn *string
// Specifies object key name filtering rules. For information about key name
- // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html)
- // in the Amazon S3 User Guide.
+ // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide.
+ //
+ // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html
Filter *NotificationConfigurationFilter
// An optional unique identifier for configurations in a notification
@@ -1687,9 +2146,11 @@ type LambdaFunctionConfiguration struct {
noSmithyDocumentSerde
}
-// Container for the expiration for the lifecycle of the object. For more
-// information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html)
-// in the Amazon S3 User Guide.
+// Container for the expiration for the lifecycle of the object.
+//
+// For more information see, [Managing your storage lifecycle] in the Amazon S3 User Guide.
+//
+// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html
type LifecycleExpiration struct {
// Indicates at what date the object is to be moved or deleted. The date value
@@ -1709,9 +2170,11 @@ type LifecycleExpiration struct {
noSmithyDocumentSerde
}
-// A lifecycle rule for individual objects in an Amazon S3 bucket. For more
-// information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html)
-// in the Amazon S3 User Guide.
+// A lifecycle rule for individual objects in an Amazon S3 bucket.
+//
+// For more information see, [Managing your storage lifecycle] in the Amazon S3 User Guide.
+//
+// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html
type LifecycleRule struct {
// If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is
@@ -1722,9 +2185,9 @@ type LifecycleRule struct {
// Specifies the days since the initiation of an incomplete multipart upload that
// Amazon S3 will wait before permanently removing all parts of the upload. For
- // more information, see Aborting Incomplete Multipart Uploads Using a Bucket
- // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
- // in the Amazon S3 User Guide.
+ // more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide.
+ //
+ // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config
AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload
// Specifies the expiration for the lifecycle of the object in the form of date,
@@ -1746,7 +2209,7 @@ type LifecycleRule struct {
// the object's lifetime.
NoncurrentVersionExpiration *NoncurrentVersionExpiration
- // Specifies the transition rule for the lifecycle rule that describes when
+ // Specifies the transition rule for the lifecycle rule that describes when
// noncurrent objects transition to a specific storage class. If your bucket is
// versioning-enabled (or versioning is suspended), you can set this action to
// request that Amazon S3 transition noncurrent object versions to a specific
@@ -1754,10 +2217,12 @@ type LifecycleRule struct {
NoncurrentVersionTransitions []NoncurrentVersionTransition
// Prefix identifying one or more objects to which the rule applies. This is no
- // longer used; use Filter instead. Replacement must be made for object keys
- // containing special characters (such as carriage returns) when using XML
- // requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints)
- // .
+ // longer used; use Filter instead.
+ //
+ // Replacement must be made for object keys containing special characters (such as
+ // carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+ //
+ // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
//
// Deprecated: This member has been deprecated.
Prefix *string
@@ -1834,11 +2299,12 @@ type LifecycleRuleFilterMemberObjectSizeLessThan struct {
func (*LifecycleRuleFilterMemberObjectSizeLessThan) isLifecycleRuleFilter() {}
-// Prefix identifying one or more objects to which the rule applies. Replacement
-// must be made for object keys containing special characters (such as carriage
-// returns) when using XML requests. For more information, see XML related object
-// key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints)
-// .
+// Prefix identifying one or more objects to which the rule applies.
+//
+// Replacement must be made for object keys containing special characters (such as
+// carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+//
+// [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
type LifecycleRuleFilterMemberPrefix struct {
Value string
@@ -1856,16 +2322,21 @@ type LifecycleRuleFilterMemberTag struct {
func (*LifecycleRuleFilterMemberTag) isLifecycleRuleFilter() {}
-// Specifies the location where the bucket will be created. For directory buckets,
-// the location type is Availability Zone. For more information about directory
-// buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html)
-// in the Amazon S3 User Guide. This functionality is only supported by directory
-// buckets.
+// Specifies the location where the bucket will be created.
+//
+// For directory buckets, the location type is Availability Zone. For more
+// information about directory buckets, see [Directory buckets]in the Amazon S3 User Guide.
+//
+// This functionality is only supported by directory buckets.
+//
+// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html
type LocationInfo struct {
- // The name of the location where the bucket will be created. For directory
- // buckets, the name of the location is the AZ ID of the Availability Zone where
- // the bucket will be created. An example AZ ID value is usw2-az1 .
+ // The name of the location where the bucket will be created.
+ //
+ // For directory buckets, the name of the location is the AZ ID of the
+ // Availability Zone where the bucket will be created. An example AZ ID value is
+ // usw2-az1 .
Name *string
// The type of location where the bucket will be created.
@@ -1875,8 +2346,10 @@ type LocationInfo struct {
}
// Describes where logs are stored and the prefix that Amazon S3 assigns to all
-// log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html)
-// in the Amazon S3 API Reference.
+// log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API
+// Reference.
+//
+// [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html
type LoggingEnabled struct {
// Specifies the bucket where you want Amazon S3 to store server access logs. You
@@ -1896,10 +2369,12 @@ type LoggingEnabled struct {
// This member is required.
TargetPrefix *string
- // Container for granting information. Buckets that use the bucket owner enforced
- // setting for Object Ownership don't support target grants. For more information,
- // see Permissions for server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general)
- // in the Amazon S3 User Guide.
+ // Container for granting information.
+ //
+ // Buckets that use the bucket owner enforced setting for Object Ownership don't
+ // support target grants. For more information, see [Permissions for server access log delivery]in the Amazon S3 User Guide.
+ //
+ // [Permissions for server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general
TargetGrants []TargetGrant
// Amazon S3 key format for log objects.
@@ -1920,16 +2395,17 @@ type MetadataEntry struct {
noSmithyDocumentSerde
}
-// A container specifying replication metrics-related settings enabling
+// A container specifying replication metrics-related settings enabling
+//
// replication metrics and events.
type Metrics struct {
- // Specifies whether the replication metrics are enabled.
+ // Specifies whether the replication metrics are enabled.
//
// This member is required.
Status MetricsStatus
- // A container specifying the time threshold for emitting the
+ // A container specifying the time threshold for emitting the
// s3:Replication:OperationMissedThreshold event.
EventThreshold *ReplicationTimeValue
@@ -1957,8 +2433,9 @@ type MetricsAndOperator struct {
// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating an
// existing metrics configuration, note that this is a full replacement of the
// existing metrics configuration. If you don't include the elements you want to
-// keep, they are erased. For more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html)
-// .
+// keep, they are erased. For more information, see [PutBucketMetricsConfiguration].
+//
+// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html
type MetricsConfiguration struct {
// The ID used to identify the metrics configuration. The ID has a 64 character
@@ -1978,8 +2455,7 @@ type MetricsConfiguration struct {
// Specifies a metrics configuration filter. The metrics configuration only
// includes objects that meet the filter's criteria. A filter must be a prefix, an
// object tag, an access point ARN, or a conjunction (MetricsAndOperator). For more
-// information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html)
-// .
+// information, see [PutBucketMetricsConfiguration].
//
// The following types satisfy this interface:
//
@@ -1987,6 +2463,8 @@ type MetricsConfiguration struct {
// MetricsFilterMemberAnd
// MetricsFilterMemberPrefix
// MetricsFilterMemberTag
+//
+// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html
type MetricsFilter interface {
isMetricsFilter()
}
@@ -2045,13 +2523,15 @@ type MultipartUpload struct {
Key *string
// Specifies the owner of the object that is part of the multipart upload.
- // Directory buckets - The bucket owner is returned as the object owner for all the
- // objects.
+ //
+ // Directory buckets - The bucket owner is returned as the object owner for all
+ // the objects.
Owner *Owner
- // The class of storage used to store the object. Directory buckets - Only the S3
- // Express One Zone storage class is supported by directory buckets to store
- // objects.
+ // The class of storage used to store the object.
+ //
+ // Directory buckets - Only the S3 Express One Zone storage class is supported by
+ // directory buckets to store objects.
StorageClass StorageClass
// Upload ID that identifies the multipart upload.
@@ -2070,15 +2550,17 @@ type NoncurrentVersionExpiration struct {
// Specifies how many newer noncurrent versions must exist before Amazon S3 can
// perform the associated action on a given version. If there are this many more
// recent noncurrent versions, Amazon S3 will take the associated action. For more
- // information about noncurrent versions, see Lifecycle configuration elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html)
- // in the Amazon S3 User Guide.
+ // information about noncurrent versions, see [Lifecycle configuration elements]in the Amazon S3 User Guide.
+ //
+ // [Lifecycle configuration elements]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html
NewerNoncurrentVersions *int32
// Specifies the number of days an object is noncurrent before Amazon S3 can
// perform the associated action. The value must be a non-zero positive integer.
- // For information about the noncurrent days calculations, see How Amazon S3
- // Calculates When an Object Became Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations)
- // in the Amazon S3 User Guide.
+ // For information about the noncurrent days calculations, see [How Amazon S3 Calculates When an Object Became Noncurrent]in the Amazon S3
+ // User Guide.
+ //
+ // [How Amazon S3 Calculates When an Object Became Noncurrent]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations
NoncurrentDays *int32
noSmithyDocumentSerde
@@ -2096,15 +2578,16 @@ type NoncurrentVersionTransition struct {
// Specifies how many newer noncurrent versions must exist before Amazon S3 can
// perform the associated action on a given version. If there are this many more
// recent noncurrent versions, Amazon S3 will take the associated action. For more
- // information about noncurrent versions, see Lifecycle configuration elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html)
- // in the Amazon S3 User Guide.
+ // information about noncurrent versions, see [Lifecycle configuration elements]in the Amazon S3 User Guide.
+ //
+ // [Lifecycle configuration elements]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html
NewerNoncurrentVersions *int32
// Specifies the number of days an object is noncurrent before Amazon S3 can
// perform the associated action. For information about the noncurrent days
- // calculations, see How Amazon S3 Calculates How Long an Object Has Been
- // Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations)
- // in the Amazon S3 User Guide.
+ // calculations, see [How Amazon S3 Calculates How Long an Object Has Been Noncurrent]in the Amazon S3 User Guide.
+ //
+ // [How Amazon S3 Calculates How Long an Object Has Been Noncurrent]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations
NoncurrentDays *int32
// The class of storage used to store the object.
@@ -2136,8 +2619,9 @@ type NotificationConfiguration struct {
}
// Specifies object key name filtering rules. For information about key name
-// filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html)
-// in the Amazon S3 User Guide.
+// filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide.
+//
+// [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html
type NotificationConfigurationFilter struct {
// A container for object key name prefix and suffix filtering rules.
@@ -2156,17 +2640,21 @@ type Object struct {
// contents of an object, not its metadata. The ETag may or may not be an MD5
// digest of the object data. Whether or not it is depends on how the object was
// created and how it is encrypted as described below:
+ //
// - Objects created by the PUT Object, POST Object, or Copy operation, or
// through the Amazon Web Services Management Console, and are encrypted by SSE-S3
// or plaintext, have ETags that are an MD5 digest of their object data.
+ //
// - Objects created by the PUT Object, POST Object, or Copy operation, or
// through the Amazon Web Services Management Console, and are encrypted by SSE-C
// or SSE-KMS, have ETags that are not an MD5 digest of their object data.
+ //
// - If an object is created by either the Multipart Upload or Part Copy
// operation, the ETag is not an MD5 digest, regardless of the method of
// encryption. If an object is larger than 16 MB, the Amazon Web Services
// Management Console will upload or copy that object as a Multipart Upload, and
// therefore the ETag will not be an MD5 digest.
+ //
// Directory buckets - MD5 is not supported by directory buckets.
ETag *string
@@ -2177,25 +2665,29 @@ type Object struct {
// Creation date of the object.
LastModified *time.Time
- // The owner of the object Directory buckets - The bucket owner is returned as the
- // object owner.
+ // The owner of the object
+ //
+ // Directory buckets - The bucket owner is returned as the object owner.
Owner *Owner
// Specifies the restoration status of an object. Objects in certain storage
// classes must be restored before they can be retrieved. For more information
- // about these storage classes and how to work with archived objects, see Working
- // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets. Only the S3 Express One Zone storage class is supported by directory
- // buckets to store objects.
+ // about these storage classes and how to work with archived objects, see [Working with archived objects]in the
+ // Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets. Only the S3 Express
+ // One Zone storage class is supported by directory buckets to store objects.
+ //
+ // [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html
RestoreStatus *RestoreStatus
// Size in bytes of the object
Size *int64
- // The class of storage used to store the object. Directory buckets - Only the S3
- // Express One Zone storage class is supported by directory buckets to store
- // objects.
+ // The class of storage used to store the object.
+ //
+ // Directory buckets - Only the S3 Express One Zone storage class is supported by
+ // directory buckets to store objects.
StorageClass ObjectStorageClass
noSmithyDocumentSerde
@@ -2204,16 +2696,19 @@ type Object struct {
// Object Identifier is unique value to identify objects.
type ObjectIdentifier struct {
- // Key name of the object. Replacement must be made for object keys containing
- // special characters (such as carriage returns) when using XML requests. For more
- // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints)
- // .
+ // Key name of the object.
+ //
+ // Replacement must be made for object keys containing special characters (such as
+ // carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+ //
+ // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
//
// This member is required.
Key *string
- // Version ID for the specific version of the object to delete. This functionality
- // is not supported for directory buckets.
+ // Version ID for the specific version of the object to delete.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string
noSmithyDocumentSerde
@@ -2273,9 +2768,10 @@ type ObjectPart struct {
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
- // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity]
// in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumCRC32 *string
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
@@ -2283,8 +2779,10 @@ type ObjectPart struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumCRC32C *string
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
@@ -2292,8 +2790,10 @@ type ObjectPart struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumSHA1 *string
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
@@ -2301,8 +2801,10 @@ type ObjectPart struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumSHA256 *string
// The part number identifying the part. This value is a positive integer between
@@ -2339,9 +2841,10 @@ type ObjectVersion struct {
// Specifies the restoration status of an object. Objects in certain storage
// classes must be restored before they can be retrieved. For more information
- // about these storage classes and how to work with archived objects, see Working
- // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html)
- // in the Amazon S3 User Guide.
+ // about these storage classes and how to work with archived objects, see [Working with archived objects]in the
+ // Amazon S3 User Guide.
+ //
+ // [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html
RestoreStatus *RestoreStatus
// Size in bytes of the object.
@@ -2382,14 +2885,23 @@ type Owner struct {
// Container for the display name of the owner. This value is only supported in
// the following Amazon Web Services Regions:
+ //
// - US East (N. Virginia)
+ //
// - US West (N. California)
+ //
// - US West (Oregon)
+ //
// - Asia Pacific (Singapore)
+ //
// - Asia Pacific (Sydney)
+ //
// - Asia Pacific (Tokyo)
+ //
// - Europe (Ireland)
+ //
// - South America (São Paulo)
+ //
// This functionality is not supported for directory buckets.
DisplayName *string
@@ -2414,23 +2926,30 @@ type OwnershipControls struct {
type OwnershipControlsRule struct {
// The container element for object ownership for a bucket's ownership controls.
+ //
// BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the
// bucket owner if the objects are uploaded with the bucket-owner-full-control
- // canned ACL. ObjectWriter - The uploading account will own the object if the
- // object is uploaded with the bucket-owner-full-control canned ACL.
+ // canned ACL.
+ //
+ // ObjectWriter - The uploading account will own the object if the object is
+ // uploaded with the bucket-owner-full-control canned ACL.
+ //
// BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer
// affect permissions. The bucket owner automatically owns and has full control
// over every object in the bucket. The bucket only accepts PUT requests that don't
// specify an ACL or specify bucket owner full control ACLs (such as the predefined
// bucket-owner-full-control canned ACL or a custom ACL in XML format that grants
- // the same permissions). By default, ObjectOwnership is set to BucketOwnerEnforced
- // and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon
- // use cases where you must control access for each object individually. For more
- // information about S3 Object Ownership, see Controlling ownership of objects and
- // disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
- // in the Amazon S3 User Guide. This functionality is not supported for directory
- // buckets. Directory buckets use the bucket owner enforced setting for S3 Object
- // Ownership.
+ // the same permissions).
+ //
+ // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are
+ // disabled. We recommend keeping ACLs disabled, except in uncommon use cases where
+ // you must control access for each object individually. For more information about
+ // S3 Object Ownership, see [Controlling ownership of objects and disabling ACLs for your bucket]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets. Directory buckets
+ // use the bucket owner enforced setting for S3 Object Ownership.
+ //
+ // [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
//
// This member is required.
ObjectOwnership ObjectOwnership
@@ -2448,9 +2967,10 @@ type Part struct {
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
- // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity]
// in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumCRC32 *string
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
@@ -2458,8 +2978,10 @@ type Part struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumCRC32C *string
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
@@ -2467,15 +2989,18 @@ type Part struct {
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
- // in the Amazon S3 User Guide.
+ // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
ChecksumSHA1 *string
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
- // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
- // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see [Checking object integrity]
// in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumSHA256 *string
// Entity tag returned when the part was uploaded.
@@ -2495,7 +3020,9 @@ type Part struct {
}
// Amazon S3 keys for log objects are partitioned in the following format:
-// [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]
+//
+// [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]
+//
// PartitionedPrefix defaults to EventTime delivery when server access logs are
// delivered.
type PartitionedPrefix struct {
@@ -2543,41 +3070,48 @@ type ProgressEvent struct {
// The PublicAccessBlock configuration that you want to apply to this Amazon S3
// bucket. You can enable the configuration options in any combination. For more
-// information about when Amazon S3 considers a bucket or object public, see The
-// Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status)
-// in the Amazon S3 User Guide.
+// information about when Amazon S3 considers a bucket or object public, see [The Meaning of "Public"]in
+// the Amazon S3 User Guide.
+//
+// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status
type PublicAccessBlockConfiguration struct {
// Specifies whether Amazon S3 should block public access control lists (ACLs) for
// this bucket and objects in this bucket. Setting this element to TRUE causes the
// following behavior:
- // - PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is
- // public.
+ //
+ // - PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is public.
+ //
// - PUT Object calls fail if the request includes a public ACL.
+ //
// - PUT Bucket calls fail if the request includes a public ACL.
+ //
// Enabling this setting doesn't affect existing policies or ACLs.
BlockPublicAcls *bool
// Specifies whether Amazon S3 should block public bucket policies for this
// bucket. Setting this element to TRUE causes Amazon S3 to reject calls to PUT
- // Bucket policy if the specified bucket policy allows public access. Enabling this
- // setting doesn't affect existing bucket policies.
+ // Bucket policy if the specified bucket policy allows public access.
+ //
+ // Enabling this setting doesn't affect existing bucket policies.
BlockPublicPolicy *bool
// Specifies whether Amazon S3 should ignore public ACLs for this bucket and
// objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore
- // all public ACLs on this bucket and objects in this bucket. Enabling this setting
- // doesn't affect the persistence of any existing ACLs and doesn't prevent new
- // public ACLs from being set.
+ // all public ACLs on this bucket and objects in this bucket.
+ //
+ // Enabling this setting doesn't affect the persistence of any existing ACLs and
+ // doesn't prevent new public ACLs from being set.
IgnorePublicAcls *bool
// Specifies whether Amazon S3 should restrict public bucket policies for this
// bucket. Setting this element to TRUE restricts access to this bucket to only
// Amazon Web Service principals and authorized users within this account if the
- // bucket has a public policy. Enabling this setting doesn't affect previously
- // stored bucket policies, except that public and cross-account access within any
- // public bucket policy, including non-public delegation to specific accounts, is
- // blocked.
+ // bucket has a public policy.
+ //
+ // Enabling this setting doesn't affect previously stored bucket policies, except
+ // that public and cross-account access within any public bucket policy, including
+ // non-public delegation to specific accounts, is blocked.
RestrictPublicBuckets *bool
noSmithyDocumentSerde
@@ -2599,8 +3133,9 @@ type QueueConfiguration struct {
QueueArn *string
// Specifies object key name filtering rules. For information about key name
- // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html)
- // in the Amazon S3 User Guide.
+ // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide.
+ //
+ // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html
Filter *NotificationConfigurationFilter
// An optional unique identifier for configurations in a notification
@@ -2639,18 +3174,22 @@ type Redirect struct {
// documents/ , you can set a condition block with KeyPrefixEquals set to docs/
// and in the Redirect set ReplaceKeyPrefixWith to /documents . Not required if one
// of the siblings is present. Can be present only if ReplaceKeyWith is not
- // provided. Replacement must be made for object keys containing special characters
- // (such as carriage returns) when using XML requests. For more information, see
- // XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints)
- // .
+ // provided.
+ //
+ // Replacement must be made for object keys containing special characters (such as
+ // carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+ //
+ // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
ReplaceKeyPrefixWith *string
// The specific object key to use in the redirect request. For example, redirect
// request to error.html . Not required if one of the siblings is present. Can be
- // present only if ReplaceKeyPrefixWith is not provided. Replacement must be made
- // for object keys containing special characters (such as carriage returns) when
- // using XML requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints)
- // .
+ // present only if ReplaceKeyPrefixWith is not provided.
+ //
+ // Replacement must be made for object keys containing special characters (such as
+ // carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+ //
+ // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
ReplaceKeyWith *string
noSmithyDocumentSerde
@@ -2676,9 +3215,11 @@ type RedirectAllRequestsTo struct {
// Amazon S3 doesn't replicate replica modifications by default. In the latest
// version of replication configuration (when Filter is specified), you can
// specify this element and set the status to Enabled to replicate modifications
-// on replicas. If you don't specify the Filter element, Amazon S3 assumes that
-// the replication configuration is the earlier version, V1. In the earlier
-// version, this element is not allowed.
+// on replicas.
+//
+// If you don't specify the Filter element, Amazon S3 assumes that the replication
+// configuration is the earlier version, V1. In the earlier version, this element
+// is not allowed.
type ReplicaModifications struct {
// Specifies whether Amazon S3 replicates modifications on replicas.
@@ -2694,9 +3235,10 @@ type ReplicaModifications struct {
type ReplicationConfiguration struct {
// The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role
- // that Amazon S3 assumes when replicating objects. For more information, see How
- // to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html)
- // in the Amazon S3 User Guide.
+ // that Amazon S3 assumes when replicating objects. For more information, see [How to Set Up Replication]in
+ // the Amazon S3 User Guide.
+ //
+ // [How to Set Up Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html
//
// This member is required.
Role *string
@@ -2729,18 +3271,21 @@ type ReplicationRule struct {
// DeleteMarkerReplication element. If your Filter includes a Tag element, the
// DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does
// not support replicating delete markers for tag-based rules. For an example
- // configuration, see Basic Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config)
- // . For more information about delete marker replication, see Basic Rule
- // Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html)
- // . If you are using an earlier version of the replication configuration, Amazon
- // S3 handles replication of delete markers differently. For more information, see
- // Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations)
- // .
+ // configuration, see [Basic Rule Configuration].
+ //
+ // For more information about delete marker replication, see [Basic Rule Configuration].
+ //
+ // If you are using an earlier version of the replication configuration, Amazon S3
+ // handles replication of delete markers differently. For more information, see [Backward Compatibility].
+ //
+ // [Basic Rule Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html
+ // [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations
DeleteMarkerReplication *DeleteMarkerReplication
// Optional configuration to replicate existing source bucket objects. For more
- // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication)
- // in the Amazon S3 User Guide.
+ // information, see [Replicating Existing Objects]in the Amazon S3 User Guide.
+ //
+ // [Replicating Existing Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication
ExistingObjectReplication *ExistingObjectReplication
// A filter that identifies the subset of objects to which the replication rule
@@ -2753,10 +3298,12 @@ type ReplicationRule struct {
// An object key name prefix that identifies the object or objects to which the
// rule applies. The maximum prefix length is 1,024 characters. To include all
- // objects in a bucket, specify an empty string. Replacement must be made for
- // object keys containing special characters (such as carriage returns) when using
- // XML requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints)
- // .
+ // objects in a bucket, specify an empty string.
+ //
+ // Replacement must be made for object keys containing special characters (such as
+ // carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+ //
+ // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
//
// Deprecated: This member has been deprecated.
Prefix *string
@@ -2766,8 +3313,10 @@ type ReplicationRule struct {
// according to all replication rules. However, if there are two or more rules with
// the same destination bucket, then objects will be replicated according to the
// rule with the highest priority. The higher the number, the higher the priority.
- // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html)
- // in the Amazon S3 User Guide.
+ //
+ // For more information, see [Replication] in the Amazon S3 User Guide.
+ //
+ // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html
Priority *int32
// A container that describes additional filters for identifying the source
@@ -2782,9 +3331,13 @@ type ReplicationRule struct {
// A container for specifying rule filters. The filters determine the subset of
// objects to which the rule applies. This element is required only if you specify
-// more than one filter. For example:
+// more than one filter.
+//
+// For example:
+//
// - If you specify both a Prefix and a Tag filter, wrap these filters in an And
// tag.
+//
// - If you specify a filter based on multiple tags, wrap the Tag elements in an
// And tag.
type ReplicationRuleAndOperator struct {
@@ -2815,8 +3368,10 @@ type ReplicationRuleFilter interface {
// A container for specifying rule filters. The filters determine the subset of
// objects to which the rule applies. This element is required only if you specify
// more than one filter. For example:
+//
// - If you specify both a Prefix and a Tag filter, wrap these filters in an And
// tag.
+//
// - If you specify a filter based on multiple tags, wrap the Tag elements in an
// And tag.
type ReplicationRuleFilterMemberAnd struct {
@@ -2828,10 +3383,12 @@ type ReplicationRuleFilterMemberAnd struct {
func (*ReplicationRuleFilterMemberAnd) isReplicationRuleFilter() {}
// An object key name prefix that identifies the subset of objects to which the
-// rule applies. Replacement must be made for object keys containing special
-// characters (such as carriage returns) when using XML requests. For more
-// information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints)
-// .
+// rule applies.
+//
+// Replacement must be made for object keys containing special characters (such as
+// carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+//
+// [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
type ReplicationRuleFilterMemberPrefix struct {
Value string
@@ -2840,8 +3397,9 @@ type ReplicationRuleFilterMemberPrefix struct {
func (*ReplicationRuleFilterMemberPrefix) isReplicationRuleFilter() {}
-// A container for specifying a tag key and value. The rule applies only to
-// objects that have the tag in their tag set.
+// A container for specifying a tag key and value.
+//
+// The rule applies only to objects that have the tag in their tag set.
type ReplicationRuleFilterMemberTag struct {
Value Tag
@@ -2850,19 +3408,20 @@ type ReplicationRuleFilterMemberTag struct {
func (*ReplicationRuleFilterMemberTag) isReplicationRuleFilter() {}
-// A container specifying S3 Replication Time Control (S3 RTC) related
+// A container specifying S3 Replication Time Control (S3 RTC) related
+//
// information, including whether S3 RTC is enabled and the time when all objects
// and operations on objects must be replicated. Must be specified together with a
// Metrics block.
type ReplicationTime struct {
- // Specifies whether the replication time is enabled.
+ // Specifies whether the replication time is enabled.
//
// This member is required.
Status ReplicationTimeStatus
- // A container specifying the time by which replication should be complete for all
- // objects and operations on objects.
+ // A container specifying the time by which replication should be complete for
+ // all objects and operations on objects.
//
// This member is required.
Time *ReplicationTimeValue
@@ -2870,11 +3429,14 @@ type ReplicationTime struct {
noSmithyDocumentSerde
}
-// A container specifying the time value for S3 Replication Time Control (S3 RTC)
+// A container specifying the time value for S3 Replication Time Control (S3 RTC)
+//
// and replication metrics EventThreshold .
type ReplicationTimeValue struct {
- // Contains an integer specifying time in minutes. Valid value: 15
+ // Contains an integer specifying time in minutes.
+ //
+ // Valid value: 15
Minutes *int32
noSmithyDocumentSerde
@@ -2905,8 +3467,10 @@ type RequestProgress struct {
type RestoreRequest struct {
// Lifetime of the active copy in days. Do not use with restores that specify
- // OutputLocation . The Days element is required for regular restores, and must not
- // be provided for select requests.
+ // OutputLocation .
+ //
+ // The Days element is required for regular restores, and must not be provided for
+ // select requests.
Days *int32
// The optional description for the job.
@@ -2933,34 +3497,43 @@ type RestoreRequest struct {
// Specifies the restoration status of an object. Objects in certain storage
// classes must be restored before they can be retrieved. For more information
-// about these storage classes and how to work with archived objects, see Working
-// with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html)
-// in the Amazon S3 User Guide. This functionality is not supported for directory
-// buckets. Only the S3 Express One Zone storage class is supported by directory
-// buckets to store objects.
+// about these storage classes and how to work with archived objects, see [Working with archived objects]in the
+// Amazon S3 User Guide.
+//
+// This functionality is not supported for directory buckets. Only the S3 Express
+// One Zone storage class is supported by directory buckets to store objects.
+//
+// [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html
type RestoreStatus struct {
// Specifies whether the object is currently being restored. If the object
// restoration is in progress, the header returns the value TRUE . For example:
- // x-amz-optional-object-attributes: IsRestoreInProgress="true" If the object
- // restoration has completed, the header returns the value FALSE . For example:
- // x-amz-optional-object-attributes: IsRestoreInProgress="false",
- // RestoreExpiryDate="2012-12-21T00:00:00.000Z" If the object hasn't been restored,
- // there is no header response.
+ //
+ // x-amz-optional-object-attributes: IsRestoreInProgress="true"
+ //
+ // If the object restoration has completed, the header returns the value FALSE .
+ // For example:
+ //
+ // x-amz-optional-object-attributes: IsRestoreInProgress="false",
+ // RestoreExpiryDate="2012-12-21T00:00:00.000Z"
+ //
+ // If the object hasn't been restored, there is no header response.
IsRestoreInProgress *bool
// Indicates when the restored copy will expire. This value is populated only if
// the object has already been restored. For example:
- // x-amz-optional-object-attributes: IsRestoreInProgress="false",
- // RestoreExpiryDate="2012-12-21T00:00:00.000Z"
+ //
+ // x-amz-optional-object-attributes: IsRestoreInProgress="false",
+ // RestoreExpiryDate="2012-12-21T00:00:00.000Z"
RestoreExpiryDate *time.Time
noSmithyDocumentSerde
}
// Specifies the redirect behavior and when a redirect is applied. For more
-// information about routing rules, see Configuring advanced conditional redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects)
-// in the Amazon S3 User Guide.
+// information about routing rules, see [Configuring advanced conditional redirects]in the Amazon S3 User Guide.
+//
+// [Configuring advanced conditional redirects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects
type RoutingRule struct {
// Container for redirect information. You can redirect requests to another host,
@@ -3135,8 +3708,9 @@ type SelectParameters struct {
// at configuration, Amazon S3 automatically creates an Amazon Web Services KMS key
// in your Amazon Web Services account the first time that you add an object
// encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for
-// SSE-KMS. For more information, see PUT Bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html)
-// in the Amazon S3 API Reference.
+// SSE-KMS. For more information, see [PUT Bucket encryption]in the Amazon S3 API Reference.
+//
+// [PUT Bucket encryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html
type ServerSideEncryptionByDefault struct {
// Server-side encryption algorithm to use for the default encryption.
@@ -3146,19 +3720,30 @@ type ServerSideEncryptionByDefault struct {
// Amazon Web Services Key Management Service (KMS) customer Amazon Web Services
// KMS key ID to use for the default encryption. This parameter is allowed if and
- // only if SSEAlgorithm is set to aws:kms or aws:kms:dsse . You can specify the key
- // ID, key alias, or the Amazon Resource Name (ARN) of the KMS key.
+ // only if SSEAlgorithm is set to aws:kms or aws:kms:dsse .
+ //
+ // You can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the
+ // KMS key.
+ //
// - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
+ //
// - Key ARN:
// arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
+ //
// - Key Alias: alias/alias-name
+ //
// If you use a key ID, you can run into a LogDestination undeliverable error when
- // creating a VPC flow log. If you are using encryption with cross-account or
- // Amazon Web Services service operations you must use a fully qualified KMS key
- // ARN. For more information, see Using encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy)
- // . Amazon S3 only supports symmetric encryption KMS keys. For more information,
- // see Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html)
- // in the Amazon Web Services Key Management Service Developer Guide.
+ // creating a VPC flow log.
+ //
+ // If you are using encryption with cross-account or Amazon Web Services service
+ // operations you must use a fully qualified KMS key ARN. For more information, see
+ // [Using encryption for cross-account operations].
+ //
+ // Amazon S3 only supports symmetric encryption KMS keys. For more information,
+ // see [Asymmetric keys in Amazon Web Services KMS]in the Amazon Web Services Key Management Service Developer Guide.
+ //
+ // [Using encryption for cross-account operations]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy
+ // [Asymmetric keys in Amazon Web Services KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html
KMSMasterKeyID *string
noSmithyDocumentSerde
@@ -3187,17 +3772,20 @@ type ServerSideEncryptionRule struct {
// Specifies whether Amazon S3 should use an S3 Bucket Key with server-side
// encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects
// are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3
- // to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. For more
- // information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html)
- // in the Amazon S3 User Guide.
+ // to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled.
+ //
+ // For more information, see [Amazon S3 Bucket Keys] in the Amazon S3 User Guide.
+ //
+ // [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html
BucketKeyEnabled *bool
noSmithyDocumentSerde
}
-// The established temporary security credentials of the session. Directory
-// buckets - These session credentials are only supported for the authentication
-// and authorization of Zonal endpoint APIs on directory buckets.
+// The established temporary security credentials of the session.
+//
+// Directory buckets - These session credentials are only supported for the
+// authentication and authorization of Zonal endpoint APIs on directory buckets.
type SessionCredentials struct {
// A unique identifier that's associated with a secret access key. The access key
@@ -3233,7 +3821,9 @@ type SessionCredentials struct {
}
// To use simple format for S3 keys for log objects, set SimplePrefix to an empty
-// object. [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]
+// object.
+//
+// [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]
type SimplePrefix struct {
noSmithyDocumentSerde
}
@@ -3249,12 +3839,14 @@ type SourceSelectionCriteria struct {
// Amazon S3 doesn't replicate replica modifications by default. In the latest
// version of replication configuration (when Filter is specified), you can
// specify this element and set the status to Enabled to replicate modifications
- // on replicas. If you don't specify the Filter element, Amazon S3 assumes that
- // the replication configuration is the earlier version, V1. In the earlier
- // version, this element is not allowed
+ // on replicas.
+ //
+ // If you don't specify the Filter element, Amazon S3 assumes that the replication
+ // configuration is the earlier version, V1. In the earlier version, this element
+ // is not allowed
ReplicaModifications *ReplicaModifications
- // A container for filter information for the selection of Amazon S3 objects
+ // A container for filter information for the selection of Amazon S3 objects
// encrypted with Amazon Web Services KMS. If you include SourceSelectionCriteria
// in the replication configuration, this element is required.
SseKmsEncryptedObjects *SseKmsEncryptedObjects
@@ -3372,10 +3964,12 @@ type Tagging struct {
noSmithyDocumentSerde
}
-// Container for granting information. Buckets that use the bucket owner enforced
-// setting for Object Ownership don't support target grants. For more information,
-// see Permissions server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general)
-// in the Amazon S3 User Guide.
+// Container for granting information.
+//
+// Buckets that use the bucket owner enforced setting for Object Ownership don't
+// support target grants. For more information, see [Permissions server access log delivery]in the Amazon S3 User Guide.
+//
+// [Permissions server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general
type TargetGrant struct {
// Container for the person being granted permissions.
@@ -3406,9 +4000,10 @@ type TargetObjectKeyFormat struct {
// without additional operational overhead.
type Tiering struct {
- // S3 Intelligent-Tiering access tier. See Storage class for automatically
- // optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access)
- // for a list of access tiers in the S3 Intelligent-Tiering storage class.
+ // S3 Intelligent-Tiering access tier. See [Storage class for automatically optimizing frequently and infrequently accessed objects] for a list of access tiers in the S3
+ // Intelligent-Tiering storage class.
+ //
+ // [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access
//
// This member is required.
AccessTier IntelligentTieringAccessTier
@@ -3431,8 +4026,9 @@ type Tiering struct {
type TopicConfiguration struct {
// The Amazon S3 bucket event about which to send notifications. For more
- // information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
- // in the Amazon S3 User Guide.
+ // information, see [Supported Event Types]in the Amazon S3 User Guide.
+ //
+ // [Supported Event Types]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
//
// This member is required.
Events []Event
@@ -3444,8 +4040,9 @@ type TopicConfiguration struct {
TopicArn *string
// Specifies object key name filtering rules. For information about key name
- // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html)
- // in the Amazon S3 User Guide.
+ // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide.
+ //
+ // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html
Filter *NotificationConfigurationFilter
// An optional unique identifier for configurations in a notification
@@ -3456,9 +4053,10 @@ type TopicConfiguration struct {
}
// Specifies when an object transitions to a specified storage class. For more
-// information about Amazon S3 lifecycle configuration rules, see Transitioning
-// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html)
-// in the Amazon S3 User Guide.
+// information about Amazon S3 lifecycle configuration rules, see [Transitioning Objects Using Amazon S3 Lifecycle]in the Amazon S3
+// User Guide.
+//
+// [Transitioning Objects Using Amazon S3 Lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html
type Transition struct {
// Indicates when objects are transitioned to the specified storage class. The
@@ -3476,8 +4074,9 @@ type Transition struct {
}
// Describes the versioning state of an Amazon S3 bucket. For more information,
-// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html)
-// in the Amazon S3 API Reference.
+// see [PUT Bucket versioning]in the Amazon S3 API Reference.
+//
+// [PUT Bucket versioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html
type VersioningConfiguration struct {
// Specifies whether MFA delete is enabled in the bucket versioning configuration.
@@ -3500,8 +4099,9 @@ type WebsiteConfiguration struct {
// The name of the index document for the website.
IndexDocument *IndexDocument
- // The redirect behavior for every request to this bucket's website endpoint. If
- // you specify this property, you can't specify any other property.
+ // The redirect behavior for every request to this bucket's website endpoint.
+ //
+ // If you specify this property, you can't specify any other property.
RedirectAllRequestsTo *RedirectAllRequestsTo
// Rules that define when a redirect is applied and the redirect behavior.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/CHANGELOG.md
index e1e7c2f953..78f118602b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/CHANGELOG.md
@@ -1,3 +1,23 @@
+# v1.29.10 (2024-05-23)
+
+* No change notes available for this release.
+
+# v1.29.9 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.8 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.7 (2024-05-08)
+
+* **Bug Fix**: GoDoc improvement
+
+# v1.29.6 (2024-04-22)
+
+* **Documentation**: This release adds examples to several Cloud Map actions.
+
# v1.29.5 (2024-04-02)
* No change notes available for this release.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_CreateHttpNamespace.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_CreateHttpNamespace.go
index 62276d7b51..59996fb1a1 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_CreateHttpNamespace.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_CreateHttpNamespace.go
@@ -13,9 +13,12 @@ import (
// Creates an HTTP namespace. Service instances registered using an HTTP namespace
// can be discovered using a DiscoverInstances request but can't be discovered
-// using DNS. For the current quota on the number of namespaces that you can create
-// using the same Amazon Web Services account, see Cloud Map quotas (https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html)
-// in the Cloud Map Developer Guide.
+// using DNS.
+//
+// For the current quota on the number of namespaces that you can create using the
+// same Amazon Web Services account, see [Cloud Map quotas]in the Cloud Map Developer Guide.
+//
+// [Cloud Map quotas]: https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html
func (c *Client) CreateHttpNamespace(ctx context.Context, params *CreateHttpNamespaceInput, optFns ...func(*Options)) (*CreateHttpNamespaceOutput, error) {
if params == nil {
params = &CreateHttpNamespaceInput{}
@@ -58,8 +61,9 @@ type CreateHttpNamespaceInput struct {
type CreateHttpNamespaceOutput struct {
// A value that you can use to determine whether the request completed
- // successfully. To get the status of the operation, see GetOperation (https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html)
- // .
+ // successfully. To get the status of the operation, see [GetOperation].
+ //
+ // [GetOperation]: https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html
OperationId *string
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_CreatePrivateDnsNamespace.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_CreatePrivateDnsNamespace.go
index 2a858752f3..ca39b1d8db 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_CreatePrivateDnsNamespace.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_CreatePrivateDnsNamespace.go
@@ -18,8 +18,9 @@ import (
// instances that are registered using a private DNS namespace can be discovered
// using either a DiscoverInstances request or using DNS. For the current quota on
// the number of namespaces that you can create using the same Amazon Web Services
-// account, see Cloud Map quotas (https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html)
-// in the Cloud Map Developer Guide.
+// account, see [Cloud Map quotas]in the Cloud Map Developer Guide.
+//
+// [Cloud Map quotas]: https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html
func (c *Client) CreatePrivateDnsNamespace(ctx context.Context, params *CreatePrivateDnsNamespaceInput, optFns ...func(*Options)) (*CreatePrivateDnsNamespaceOutput, error) {
if params == nil {
params = &CreatePrivateDnsNamespaceInput{}
@@ -72,8 +73,9 @@ type CreatePrivateDnsNamespaceInput struct {
type CreatePrivateDnsNamespaceOutput struct {
// A value that you can use to determine whether the request completed
- // successfully. To get the status of the operation, see GetOperation (https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html)
- // .
+ // successfully. To get the status of the operation, see [GetOperation].
+ //
+ // [GetOperation]: https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html
OperationId *string
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_CreatePublicDnsNamespace.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_CreatePublicDnsNamespace.go
index 1b51cc4bc2..514cf52260 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_CreatePublicDnsNamespace.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_CreatePublicDnsNamespace.go
@@ -17,9 +17,13 @@ import (
// the service is backend.example.com . You can discover instances that were
// registered with a public DNS namespace by using either a DiscoverInstances
// request or using DNS. For the current quota on the number of namespaces that you
-// can create using the same Amazon Web Services account, see Cloud Map quotas (https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html)
-// in the Cloud Map Developer Guide. The CreatePublicDnsNamespace API operation is
-// not supported in the Amazon Web Services GovCloud (US) Regions.
+// can create using the same Amazon Web Services account, see [Cloud Map quotas]in the Cloud Map
+// Developer Guide.
+//
+// The CreatePublicDnsNamespace API operation is not supported in the Amazon Web
+// Services GovCloud (US) Regions.
+//
+// [Cloud Map quotas]: https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html
func (c *Client) CreatePublicDnsNamespace(ctx context.Context, params *CreatePublicDnsNamespaceInput, optFns ...func(*Options)) (*CreatePublicDnsNamespaceOutput, error) {
if params == nil {
params = &CreatePublicDnsNamespaceInput{}
@@ -37,8 +41,10 @@ func (c *Client) CreatePublicDnsNamespace(ctx context.Context, params *CreatePub
type CreatePublicDnsNamespaceInput struct {
- // The name that you want to assign to this namespace. Do not include sensitive
- // information in the name. The name is publicly available using DNS queries.
+ // The name that you want to assign to this namespace.
+ //
+ // Do not include sensitive information in the name. The name is publicly
+ // available using DNS queries.
//
// This member is required.
Name *string
@@ -66,8 +72,9 @@ type CreatePublicDnsNamespaceInput struct {
type CreatePublicDnsNamespaceOutput struct {
// A value that you can use to determine whether the request completed
- // successfully. To get the status of the operation, see GetOperation (https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html)
- // .
+ // successfully. To get the status of the operation, see [GetOperation].
+ //
+ // [GetOperation]: https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html
OperationId *string
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_CreateService.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_CreateService.go
index eb69502e50..64dbe623e6 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_CreateService.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_CreateService.go
@@ -13,21 +13,31 @@ import (
// Creates a service. This action defines the configuration for the following
// entities:
+//
// - For public and private DNS namespaces, one of the following combinations of
// DNS records in Amazon Route 53:
+//
// - A
+//
// - AAAA
+//
// - A and AAAA
+//
// - SRV
+//
// - CNAME
+//
// - Optionally, a health check
//
-// After you create the service, you can submit a RegisterInstance (https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html)
-// request, and Cloud Map uses the values in the configuration to create the
-// specified entities. For the current quota on the number of instances that you
-// can register using the same namespace and using the same service, see Cloud Map
-// quotas (https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html)
-// in the Cloud Map Developer Guide.
+// After you create the service, you can submit a [RegisterInstance] request, and Cloud Map uses the
+// values in the configuration to create the specified entities.
+//
+// For the current quota on the number of instances that you can register using
+// the same namespace and using the same service, see [Cloud Map quotas]in the Cloud Map Developer
+// Guide.
+//
+// [Cloud Map quotas]: https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html
+// [RegisterInstance]: https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html
func (c *Client) CreateService(ctx context.Context, params *CreateServiceInput, optFns ...func(*Options)) (*CreateServiceOutput, error) {
if params == nil {
params = &CreateServiceInput{}
@@ -45,21 +55,32 @@ func (c *Client) CreateService(ctx context.Context, params *CreateServiceInput,
type CreateServiceInput struct {
- // The name that you want to assign to the service. Do not include sensitive
- // information in the name if the namespace is discoverable by public DNS queries.
+ // The name that you want to assign to the service.
+ //
+ // Do not include sensitive information in the name if the namespace is
+ // discoverable by public DNS queries.
+ //
// If you want Cloud Map to create an SRV record when you register an instance and
- // you're using a system that requires a specific SRV format, such as HAProxy (http://www.haproxy.org/)
- // , specify the following for Name :
+ // you're using a system that requires a specific SRV format, such as [HAProxy], specify
+ // the following for Name :
+ //
// - Start the name with an underscore (_), such as _exampleservice .
+ //
// - End the name with ._protocol, such as ._tcp .
+ //
// When you register an instance, Cloud Map creates an SRV record and assigns a
// name to the record by concatenating the service name and the namespace name (for
- // example, _exampleservice._tcp.example.com ). For services that are accessible by
- // DNS queries, you can't create multiple services with names that differ only by
- // case (such as EXAMPLE and example). Otherwise, these services have the same DNS
- // name and can't be distinguished. However, if you use a namespace that's only
- // accessible by API calls, then you can create services that with names that
- // differ only by case.
+ // example,
+ //
+ // _exampleservice._tcp.example.com ).
+ //
+ // For services that are accessible by DNS queries, you can't create multiple
+ // services with names that differ only by case (such as EXAMPLE and example).
+ // Otherwise, these services have the same DNS name and can't be distinguished.
+ // However, if you use a namespace that's only accessible by API calls, then you
+ // can create services that with names that differ only by case.
+ //
+ // [HAProxy]: http://www.haproxy.org/
//
// This member is required.
Name *string
@@ -76,20 +97,26 @@ type CreateServiceInput struct {
// you want Cloud Map to create when you register an instance.
DnsConfig *types.DnsConfig
- // Public DNS and HTTP namespaces only. A complex type that contains settings for
+ // Public DNS and HTTP namespaces only. A complex type that contains settings for
// an optional Route 53 health check. If you specify settings for a health check,
// Cloud Map associates the health check with all the Route 53 DNS records that you
- // specify in DnsConfig . If you specify a health check configuration, you can
- // specify either HealthCheckCustomConfig or HealthCheckConfig but not both. For
- // information about the charges for health checks, see Cloud Map Pricing (http://aws.amazon.com/cloud-map/pricing/)
- // .
+ // specify in DnsConfig .
+ //
+ // If you specify a health check configuration, you can specify either
+ // HealthCheckCustomConfig or HealthCheckConfig but not both.
+ //
+ // For information about the charges for health checks, see [Cloud Map Pricing].
+ //
+ // [Cloud Map Pricing]: http://aws.amazon.com/cloud-map/pricing/
HealthCheckConfig *types.HealthCheckConfig
// A complex type that contains information about an optional custom health check.
+ //
// If you specify a health check configuration, you can specify either
- // HealthCheckCustomConfig or HealthCheckConfig but not both. You can't add,
- // update, or delete a HealthCheckCustomConfig configuration from an existing
- // service.
+ // HealthCheckCustomConfig or HealthCheckConfig but not both.
+ //
+ // You can't add, update, or delete a HealthCheckCustomConfig configuration from
+ // an existing service.
HealthCheckCustomConfig *types.HealthCheckCustomConfig
// The ID of the namespace that you want to use to create the service. The
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_DeleteNamespace.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_DeleteNamespace.go
index 4b4b04cef0..d2a3116ed2 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_DeleteNamespace.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_DeleteNamespace.go
@@ -40,8 +40,9 @@ type DeleteNamespaceInput struct {
type DeleteNamespaceOutput struct {
// A value that you can use to determine whether the request completed
- // successfully. To get the status of the operation, see GetOperation (https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html)
- // .
+ // successfully. To get the status of the operation, see [GetOperation].
+ //
+ // [GetOperation]: https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html
OperationId *string
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_DeregisterInstance.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_DeregisterInstance.go
index 9eb27dce56..9154926bba 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_DeregisterInstance.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_DeregisterInstance.go
@@ -29,8 +29,9 @@ func (c *Client) DeregisterInstance(ctx context.Context, params *DeregisterInsta
type DeregisterInstanceInput struct {
- // The value that you specified for Id in the RegisterInstance (https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html)
- // request.
+ // The value that you specified for Id in the [RegisterInstance] request.
+ //
+ // [RegisterInstance]: https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html
//
// This member is required.
InstanceId *string
@@ -46,8 +47,9 @@ type DeregisterInstanceInput struct {
type DeregisterInstanceOutput struct {
// A value that you can use to determine whether the request completed
- // successfully. To get the status of the operation, see GetOperation (https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html)
- // .
+ // successfully. To get the status of the operation, see [GetOperation].
+ //
+ // [GetOperation]: https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html
OperationId *string
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_DiscoverInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_DiscoverInstances.go
index 466fc01cef..d0e3d91ef0 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_DiscoverInstances.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_DiscoverInstances.go
@@ -34,7 +34,9 @@ func (c *Client) DiscoverInstances(ctx context.Context, params *DiscoverInstance
type DiscoverInstancesInput struct {
// The HttpName name of the namespace. It's found in the HttpProperties member of
- // the Properties member of the namespace.
+ // the Properties member of the namespace. In most cases, Name and HttpName match.
+ // However, if you reuse Name for namespace creation, a generated hash is added to
+ // HttpName to distinguish the two.
//
// This member is required.
NamespaceName *string
@@ -46,10 +48,17 @@ type DiscoverInstancesInput struct {
// The health status of the instances that you want to discover. This parameter is
// ignored for services that don't have a health check configured, and all
- // instances are returned. HEALTHY Returns healthy instances. UNHEALTHY Returns
- // unhealthy instances. ALL Returns all instances. HEALTHY_OR_ELSE_ALL Returns
- // healthy instances, unless none are reporting a healthy state. In that case,
- // return all instances. This is also called failing open.
+ // instances are returned.
+ //
+ // HEALTHY Returns healthy instances.
+ //
+ // UNHEALTHY Returns unhealthy instances.
+ //
+ // ALL Returns all instances.
+ //
+ // HEALTHY_OR_ELSE_ALL Returns healthy instances, unless none are reporting a
+ // healthy state. In that case, return all instances. This is also called failing
+ // open.
HealthStatus types.HealthStatusFilter
// The maximum number of instances that you want Cloud Map to return in the
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_GetInstancesHealthStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_GetInstancesHealthStatus.go
index 4a81953f8a..d6f116cd56 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_GetInstancesHealthStatus.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_GetInstancesHealthStatus.go
@@ -12,9 +12,10 @@ import (
)
// Gets the current health status ( Healthy , Unhealthy , or Unknown ) of one or
-// more instances that are associated with a specified service. There's a brief
-// delay between when you register an instance and when the health status for the
-// instance is available.
+// more instances that are associated with a specified service.
+//
+// There's a brief delay between when you register an instance and when the health
+// status for the instance is available.
func (c *Client) GetInstancesHealthStatus(ctx context.Context, params *GetInstancesHealthStatusInput, optFns ...func(*Options)) (*GetInstancesHealthStatusOutput, error) {
if params == nil {
params = &GetInstancesHealthStatusInput{}
@@ -38,11 +39,15 @@ type GetInstancesHealthStatusInput struct {
ServiceId *string
// An array that contains the IDs of all the instances that you want to get the
- // health status for. If you omit Instances , Cloud Map returns the health status
- // for all the instances that are associated with the specified service. To get the
- // IDs for the instances that you've registered by using a specified service,
- // submit a ListInstances (https://docs.aws.amazon.com/cloud-map/latest/api/API_ListInstances.html)
- // request.
+ // health status for.
+ //
+ // If you omit Instances , Cloud Map returns the health status for all the
+ // instances that are associated with the specified service.
+ //
+ // To get the IDs for the instances that you've registered by using a specified
+ // service, submit a [ListInstances]request.
+ //
+ // [ListInstances]: https://docs.aws.amazon.com/cloud-map/latest/api/API_ListInstances.html
Instances []string
// The maximum number of instances that you want Cloud Map to return in the
@@ -50,10 +55,11 @@ type GetInstancesHealthStatusInput struct {
// for MaxResults , Cloud Map returns up to 100 instances.
MaxResults *int32
- // For the first GetInstancesHealthStatus request, omit this value. If more than
- // MaxResults instances match the specified criteria, you can submit another
- // GetInstancesHealthStatus request to get the next group of results. Specify the
- // value of NextToken from the previous response in the next request.
+ // For the first GetInstancesHealthStatus request, omit this value.
+ //
+ // If more than MaxResults instances match the specified criteria, you can submit
+ // another GetInstancesHealthStatus request to get the next group of results.
+ // Specify the value of NextToken from the previous response in the next request.
NextToken *string
noSmithyDocumentSerde
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_GetOperation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_GetOperation.go
index 1ad47e19f9..f72ef2a814 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_GetOperation.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_GetOperation.go
@@ -12,9 +12,11 @@ import (
)
// Gets information about any operation that returns an operation ID in the
-// response, such as a CreateHttpNamespace request. To get a list of operations
-// that match specified criteria, see ListOperations (https://docs.aws.amazon.com/cloud-map/latest/api/API_ListOperations.html)
-// .
+// response, such as a CreateHttpNamespace request.
+//
+// To get a list of operations that match specified criteria, see [ListOperations].
+//
+// [ListOperations]: https://docs.aws.amazon.com/cloud-map/latest/api/API_ListOperations.html
func (c *Client) GetOperation(ctx context.Context, params *GetOperationInput, optFns ...func(*Options)) (*GetOperationOutput, error) {
if params == nil {
params = &GetOperationInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_ListInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_ListInstances.go
index 9b84c1b45b..70655b88d4 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_ListInstances.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_ListInstances.go
@@ -40,10 +40,11 @@ type ListInstancesInput struct {
// , Cloud Map returns up to 100 instances.
MaxResults *int32
- // For the first ListInstances request, omit this value. If more than MaxResults
- // instances match the specified criteria, you can submit another ListInstances
- // request to get the next group of results. Specify the value of NextToken from
- // the previous response in the next request.
+ // For the first ListInstances request, omit this value.
+ //
+ // If more than MaxResults instances match the specified criteria, you can submit
+ // another ListInstances request to get the next group of results. Specify the
+ // value of NextToken from the previous response in the next request.
NextToken *string
noSmithyDocumentSerde
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_ListNamespaces.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_ListNamespaces.go
index 69b1650bff..e62e03f69b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_ListNamespaces.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_ListNamespaces.go
@@ -31,8 +31,10 @@ func (c *Client) ListNamespaces(ctx context.Context, params *ListNamespacesInput
type ListNamespacesInput struct {
// A complex type that contains specifications for the namespaces that you want to
- // list. If you specify more than one filter, a namespace must match all filters to
- // be returned by ListNamespaces .
+ // list.
+ //
+ // If you specify more than one filter, a namespace must match all filters to be
+ // returned by ListNamespaces .
Filters []types.NamespaceFilter
// The maximum number of namespaces that you want Cloud Map to return in the
@@ -40,11 +42,14 @@ type ListNamespacesInput struct {
// MaxResults , Cloud Map returns up to 100 namespaces.
MaxResults *int32
- // For the first ListNamespaces request, omit this value. If the response contains
- // NextToken , submit another ListNamespaces request to get the next group of
- // results. Specify the value of NextToken from the previous response in the next
- // request. Cloud Map gets MaxResults namespaces and then filters them based on
- // the specified criteria. It's possible that no namespaces in the first MaxResults
+ // For the first ListNamespaces request, omit this value.
+ //
+ // If the response contains NextToken , submit another ListNamespaces request to
+ // get the next group of results. Specify the value of NextToken from the previous
+ // response in the next request.
+ //
+ // Cloud Map gets MaxResults namespaces and then filters them based on the
+ // specified criteria. It's possible that no namespaces in the first MaxResults
// namespaces matched the specified criteria but that subsequent groups of
// MaxResults namespaces do contain namespaces that match the criteria.
NextToken *string
@@ -60,11 +65,12 @@ type ListNamespacesOutput struct {
// If the response contains NextToken , submit another ListNamespaces request to
// get the next group of results. Specify the value of NextToken from the previous
- // response in the next request. Cloud Map gets MaxResults namespaces and then
- // filters them based on the specified criteria. It's possible that no namespaces
- // in the first MaxResults namespaces matched the specified criteria but that
- // subsequent groups of MaxResults namespaces do contain namespaces that match the
- // criteria.
+ // response in the next request.
+ //
+ // Cloud Map gets MaxResults namespaces and then filters them based on the
+ // specified criteria. It's possible that no namespaces in the first MaxResults
+ // namespaces matched the specified criteria but that subsequent groups of
+ // MaxResults namespaces do contain namespaces that match the criteria.
NextToken *string
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_ListOperations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_ListOperations.go
index d915bacff8..6a153bb61e 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_ListOperations.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_ListOperations.go
@@ -31,8 +31,10 @@ type ListOperationsInput struct {
// A complex type that contains specifications for the operations that you want to
// list, for example, operations that you started between a specified start date
- // and end date. If you specify more than one filter, an operation must match all
- // filters to be returned by ListOperations .
+ // and end date.
+ //
+ // If you specify more than one filter, an operation must match all filters to be
+ // returned by ListOperations .
Filters []types.OperationFilter
// The maximum number of items that you want Cloud Map to return in the response
@@ -40,11 +42,14 @@ type ListOperationsInput struct {
// Map returns up to 100 operations.
MaxResults *int32
- // For the first ListOperations request, omit this value. If the response contains
- // NextToken , submit another ListOperations request to get the next group of
- // results. Specify the value of NextToken from the previous response in the next
- // request. Cloud Map gets MaxResults operations and then filters them based on
- // the specified criteria. It's possible that no operations in the first MaxResults
+ // For the first ListOperations request, omit this value.
+ //
+ // If the response contains NextToken , submit another ListOperations request to
+ // get the next group of results. Specify the value of NextToken from the previous
+ // response in the next request.
+ //
+ // Cloud Map gets MaxResults operations and then filters them based on the
+ // specified criteria. It's possible that no operations in the first MaxResults
// operations matched the specified criteria but that subsequent groups of
// MaxResults operations do contain operations that match the criteria.
NextToken *string
@@ -56,11 +61,12 @@ type ListOperationsOutput struct {
// If the response contains NextToken , submit another ListOperations request to
// get the next group of results. Specify the value of NextToken from the previous
- // response in the next request. Cloud Map gets MaxResults operations and then
- // filters them based on the specified criteria. It's possible that no operations
- // in the first MaxResults operations matched the specified criteria but that
- // subsequent groups of MaxResults operations do contain operations that match the
- // criteria.
+ // response in the next request.
+ //
+ // Cloud Map gets MaxResults operations and then filters them based on the
+ // specified criteria. It's possible that no operations in the first MaxResults
+ // operations matched the specified criteria but that subsequent groups of
+ // MaxResults operations do contain operations that match the criteria.
NextToken *string
// Summary information about the operations that match the specified criteria.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_ListServices.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_ListServices.go
index 904c5f6bc0..b84322ec90 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_ListServices.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_ListServices.go
@@ -12,7 +12,7 @@ import (
)
// Lists summary information for all the services that are associated with one or
-// more specified namespaces.
+// more namespaces.
func (c *Client) ListServices(ctx context.Context, params *ListServicesInput, optFns ...func(*Options)) (*ListServicesOutput, error) {
if params == nil {
params = &ListServicesInput{}
@@ -31,8 +31,10 @@ func (c *Client) ListServices(ctx context.Context, params *ListServicesInput, op
type ListServicesInput struct {
// A complex type that contains specifications for the namespaces that you want to
- // list services for. If you specify more than one filter, an operation must match
- // all filters to be returned by ListServices .
+ // list services for.
+ //
+ // If you specify more than one filter, an operation must match all filters to be
+ // returned by ListServices .
Filters []types.ServiceFilter
// The maximum number of services that you want Cloud Map to return in the
@@ -40,12 +42,15 @@ type ListServicesInput struct {
// , Cloud Map returns up to 100 services.
MaxResults *int32
- // For the first ListServices request, omit this value. If the response contains
- // NextToken , submit another ListServices request to get the next group of
- // results. Specify the value of NextToken from the previous response in the next
- // request. Cloud Map gets MaxResults services and then filters them based on the
- // specified criteria. It's possible that no services in the first MaxResults
- // services matched the specified criteria but that subsequent groups of MaxResults
+ // For the first ListServices request, omit this value.
+ //
+ // If the response contains NextToken , submit another ListServices request to get
+ // the next group of results. Specify the value of NextToken from the previous
+ // response in the next request.
+ //
+ // Cloud Map gets MaxResults services and then filters them based on the specified
+ // criteria. It's possible that no services in the first MaxResults services
+ // matched the specified criteria but that subsequent groups of MaxResults
// services do contain services that match the criteria.
NextToken *string
@@ -56,11 +61,12 @@ type ListServicesOutput struct {
// If the response contains NextToken , submit another ListServices request to get
// the next group of results. Specify the value of NextToken from the previous
- // response in the next request. Cloud Map gets MaxResults services and then
- // filters them based on the specified criteria. It's possible that no services in
- // the first MaxResults services matched the specified criteria but that
- // subsequent groups of MaxResults services do contain services that match the
- // criteria.
+ // response in the next request.
+ //
+ // Cloud Map gets MaxResults services and then filters them based on the specified
+ // criteria. It's possible that no services in the first MaxResults services
+ // matched the specified criteria but that subsequent groups of MaxResults
+ // services do contain services that match the criteria.
NextToken *string
// An array that contains one ServiceSummary object for each service that matches
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_RegisterInstance.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_RegisterInstance.go
index cf487975c2..4b049b7a16 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_RegisterInstance.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_RegisterInstance.go
@@ -13,27 +13,38 @@ import (
// Creates or updates one or more records and, optionally, creates a health check
// based on the settings in a specified service. When you submit a RegisterInstance
// request, the following occurs:
+//
// - For each DNS record that you define in the service that's specified by
// ServiceId , a record is created or updated in the hosted zone that's
// associated with the corresponding namespace.
+//
// - If the service includes HealthCheckConfig , a health check is created based
// on the settings in the health check configuration.
+//
// - The health check, if any, is associated with each of the new or updated
// records.
//
// One RegisterInstance request must complete before you can submit another
-// request and specify the same service ID and instance ID. For more information,
-// see CreateService (https://docs.aws.amazon.com/cloud-map/latest/api/API_CreateService.html)
-// . When Cloud Map receives a DNS query for the specified DNS name, it returns the
+// request and specify the same service ID and instance ID.
+//
+// For more information, see [CreateService].
+//
+// When Cloud Map receives a DNS query for the specified DNS name, it returns the
// applicable value:
+//
// - If the health check is healthy: returns all the records
+//
// - If the health check is unhealthy: returns the applicable value for the last
// healthy instance
+//
// - If you didn't specify a health check configuration: returns all the records
//
// For the current quota on the number of instances that you can register using
-// the same namespace and using the same service, see Cloud Map quotas (https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html)
-// in the Cloud Map Developer Guide.
+// the same namespace and using the same service, see [Cloud Map quotas]in the Cloud Map Developer
+// Guide.
+//
+// [CreateService]: https://docs.aws.amazon.com/cloud-map/latest/api/API_CreateService.html
+// [Cloud Map quotas]: https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html
func (c *Client) RegisterInstance(ctx context.Context, params *RegisterInstanceInput, optFns ...func(*Options)) (*RegisterInstanceOutput, error) {
if params == nil {
params = &RegisterInstanceInput{}
@@ -53,82 +64,122 @@ type RegisterInstanceInput struct {
// A string map that contains the following information for the service that you
// specify in ServiceId :
+ //
// - The attributes that apply to the records that are defined in the service.
+ //
// - For each attribute, the applicable value.
+ //
// Do not include sensitive information in the attributes if the namespace is
- // discoverable by public DNS queries. Supported attribute keys include the
- // following: AWS_ALIAS_DNS_NAME If you want Cloud Map to create an Amazon Route 53
- // alias record that routes traffic to an Elastic Load Balancing load balancer,
- // specify the DNS name that's associated with the load balancer. For information
- // about how to get the DNS name, see "DNSName" in the topic AliasTarget (https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html)
- // in the Route 53 API Reference. Note the following:
+ // discoverable by public DNS queries.
+ //
+ // The following are the supported attribute keys.
+ //
+ // AWS_ALIAS_DNS_NAME If you want Cloud Map to create an Amazon Route 53 alias
+ // record that routes traffic to an Elastic Load Balancing load balancer, specify
+ // the DNS name that's associated with the load balancer. For information about how
+ // to get the DNS name, see "DNSName" in the topic [AliasTarget]in the Route 53 API Reference.
+ //
+ // Note the following:
+ //
// - The configuration for the service that's specified by ServiceId must include
// settings for an A record, an AAAA record, or both.
+ //
// - In the service that's specified by ServiceId , the value of RoutingPolicy
// must be WEIGHTED .
+ //
// - If the service that's specified by ServiceId includes HealthCheckConfig
// settings, Cloud Map will create the Route 53 health check, but it doesn't
// associate the health check with the alias record.
+ //
// - Cloud Map currently doesn't support creating alias records that route
// traffic to Amazon Web Services resources other than Elastic Load Balancing load
// balancers.
+ //
// - If you specify a value for AWS_ALIAS_DNS_NAME , don't specify values for any
// of the AWS_INSTANCE attributes.
- // AWS_EC2_INSTANCE_ID HTTP namespaces only. The Amazon EC2 instance ID for the
+ //
+ // - The AWS_ALIAS_DNS_NAME is not supported in the GovCloud (US) Regions.
+ //
+ // AWS_EC2_INSTANCE_ID HTTP namespaces only. The Amazon EC2 instance ID for the
// instance. If the AWS_EC2_INSTANCE_ID attribute is specified, then the only
// other attribute that can be specified is AWS_INIT_HEALTH_STATUS . When the
// AWS_EC2_INSTANCE_ID attribute is specified, then the AWS_INSTANCE_IPV4
// attribute will be filled out with the primary private IPv4 address.
+ //
// AWS_INIT_HEALTH_STATUS If the service configuration includes
// HealthCheckCustomConfig , you can optionally use AWS_INIT_HEALTH_STATUS to
// specify the initial status of the custom health check, HEALTHY or UNHEALTHY . If
// you don't specify a value for AWS_INIT_HEALTH_STATUS , the initial status is
- // HEALTHY . AWS_INSTANCE_CNAME If the service configuration includes a CNAME
- // record, the domain name that you want Route 53 to return in response to DNS
- // queries (for example, example.com ). This value is required if the service
- // specified by ServiceId includes settings for an CNAME record. AWS_INSTANCE_IPV4
- // If the service configuration includes an A record, the IPv4 address that you
- // want Route 53 to return in response to DNS queries (for example, 192.0.2.44 ).
+ // HEALTHY .
+ //
+ // AWS_INSTANCE_CNAME If the service configuration includes a CNAME record, the
+ // domain name that you want Route 53 to return in response to DNS queries (for
+ // example, example.com ).
+ //
+ // This value is required if the service specified by ServiceId includes settings
+ // for an CNAME record.
+ //
+ // AWS_INSTANCE_IPV4 If the service configuration includes an A record, the IPv4
+ // address that you want Route 53 to return in response to DNS queries (for
+ // example, 192.0.2.44 ).
+ //
// This value is required if the service specified by ServiceId includes settings
// for an A record. If the service includes settings for an SRV record, you must
// specify a value for AWS_INSTANCE_IPV4 , AWS_INSTANCE_IPV6 , or both.
+ //
// AWS_INSTANCE_IPV6 If the service configuration includes an AAAA record, the
// IPv6 address that you want Route 53 to return in response to DNS queries (for
- // example, 2001:0db8:85a3:0000:0000:abcd:0001:2345 ). This value is required if
- // the service specified by ServiceId includes settings for an AAAA record. If the
- // service includes settings for an SRV record, you must specify a value for
- // AWS_INSTANCE_IPV4 , AWS_INSTANCE_IPV6 , or both. AWS_INSTANCE_PORT If the
- // service includes an SRV record, the value that you want Route 53 to return for
- // the port. If the service includes HealthCheckConfig , the port on the endpoint
- // that you want Route 53 to send requests to. This value is required if you
- // specified settings for an SRV record or a Route 53 health check when you
- // created the service. Custom attributes You can add up to 30 custom attributes.
- // For each key-value pair, the maximum length of the attribute name is 255
- // characters, and the maximum length of the attribute value is 1,024 characters.
- // The total size of all provided attributes (sum of all keys and values) must not
- // exceed 5,000 characters.
+ // example, 2001:0db8:85a3:0000:0000:abcd:0001:2345 ).
+ //
+ // This value is required if the service specified by ServiceId includes settings
+ // for an AAAA record. If the service includes settings for an SRV record, you
+ // must specify a value for AWS_INSTANCE_IPV4 , AWS_INSTANCE_IPV6 , or both.
+ //
+ // AWS_INSTANCE_PORT If the service includes an SRV record, the value that you
+ // want Route 53 to return for the port.
+ //
+ // If the service includes HealthCheckConfig , the port on the endpoint that you
+ // want Route 53 to send requests to.
+ //
+ // This value is required if you specified settings for an SRV record or a Route
+ // 53 health check when you created the service.
+ //
+ // Custom attributes You can add up to 30 custom attributes. For each key-value
+ // pair, the maximum length of the attribute name is 255 characters, and the
+ // maximum length of the attribute value is 1,024 characters. The total size of all
+ // provided attributes (sum of all keys and values) must not exceed 5,000
+ // characters.
+ //
+ // [AliasTarget]: https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html
//
// This member is required.
Attributes map[string]string
// An identifier that you want to associate with the instance. Note the following:
+ //
// - If the service that's specified by ServiceId includes settings for an SRV
// record, the value of InstanceId is automatically included as part of the value
- // for the SRV record. For more information, see DnsRecord > Type (https://docs.aws.amazon.com/cloud-map/latest/api/API_DnsRecord.html#cloudmap-Type-DnsRecord-Type)
- // .
+ // for the SRV record. For more information, see [DnsRecord > Type].
+ //
// - You can use this value to update an existing instance.
+ //
// - To register a new instance, you must specify a value that's unique among
// instances that you register by using the same service.
+ //
// - If you specify an existing InstanceId and ServiceId , Cloud Map updates the
// existing DNS records, if any. If there's also an existing health check, Cloud
- // Map deletes the old health check and creates a new one. The health check isn't
- // deleted immediately, so it will still appear for a while if you submit a
- // ListHealthChecks request, for example.
+ // Map deletes the old health check and creates a new one.
+ //
+ // The health check isn't deleted immediately, so it will still appear for a while
+ // if you submit a ListHealthChecks request, for example.
+ //
// Do not include sensitive information in InstanceId if the namespace is
// discoverable by public DNS queries and any Type member of DnsRecord for the
// service contains SRV because the InstanceId is discoverable by public DNS
// queries.
//
+ // [DnsRecord > Type]: https://docs.aws.amazon.com/cloud-map/latest/api/API_DnsRecord.html#cloudmap-Type-DnsRecord-Type
+ //
// This member is required.
InstanceId *string
@@ -151,8 +202,9 @@ type RegisterInstanceInput struct {
type RegisterInstanceOutput struct {
// A value that you can use to determine whether the request completed
- // successfully. To get the status of the operation, see GetOperation (https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html)
- // .
+ // successfully. To get the status of the operation, see [GetOperation].
+ //
+ // [GetOperation]: https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html
OperationId *string
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_UpdateHttpNamespace.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_UpdateHttpNamespace.go
index 47979754e0..2e6d7cb8f4 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_UpdateHttpNamespace.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_UpdateHttpNamespace.go
@@ -51,8 +51,9 @@ type UpdateHttpNamespaceInput struct {
type UpdateHttpNamespaceOutput struct {
// A value that you can use to determine whether the request completed
- // successfully. To get the status of the operation, see GetOperation (https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html)
- // .
+ // successfully. To get the status of the operation, see [GetOperation].
+ //
+ // [GetOperation]: https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html
OperationId *string
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_UpdateInstanceCustomHealthStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_UpdateInstanceCustomHealthStatus.go
index 95d5391bb1..8ed4b2440c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_UpdateInstanceCustomHealthStatus.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_UpdateInstanceCustomHealthStatus.go
@@ -12,12 +12,16 @@ import (
)
// Submits a request to change the health status of a custom health check to
-// healthy or unhealthy. You can use UpdateInstanceCustomHealthStatus to change
-// the status only for custom health checks, which you define using
-// HealthCheckCustomConfig when you create a service. You can't use it to change
-// the status for Route 53 health checks, which you define using HealthCheckConfig
-// . For more information, see HealthCheckCustomConfig (https://docs.aws.amazon.com/cloud-map/latest/api/API_HealthCheckCustomConfig.html)
-// .
+// healthy or unhealthy.
+//
+// You can use UpdateInstanceCustomHealthStatus to change the status only for
+// custom health checks, which you define using HealthCheckCustomConfig when you
+// create a service. You can't use it to change the status for Route 53 health
+// checks, which you define using HealthCheckConfig .
+//
+// For more information, see [HealthCheckCustomConfig].
+//
+// [HealthCheckCustomConfig]: https://docs.aws.amazon.com/cloud-map/latest/api/API_HealthCheckCustomConfig.html
func (c *Client) UpdateInstanceCustomHealthStatus(ctx context.Context, params *UpdateInstanceCustomHealthStatusInput, optFns ...func(*Options)) (*UpdateInstanceCustomHealthStatusOutput, error) {
if params == nil {
params = &UpdateInstanceCustomHealthStatusInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_UpdatePrivateDnsNamespace.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_UpdatePrivateDnsNamespace.go
index a0da7e9f05..050873a403 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_UpdatePrivateDnsNamespace.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_UpdatePrivateDnsNamespace.go
@@ -51,8 +51,9 @@ type UpdatePrivateDnsNamespaceInput struct {
type UpdatePrivateDnsNamespaceOutput struct {
// A value that you can use to determine whether the request completed
- // successfully. To get the status of the operation, see GetOperation (https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html)
- // .
+ // successfully. To get the status of the operation, see [GetOperation].
+ //
+ // [GetOperation]: https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html
OperationId *string
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_UpdatePublicDnsNamespace.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_UpdatePublicDnsNamespace.go
index d40dc8099c..ed31c9c961 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_UpdatePublicDnsNamespace.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_UpdatePublicDnsNamespace.go
@@ -51,8 +51,9 @@ type UpdatePublicDnsNamespaceInput struct {
type UpdatePublicDnsNamespaceOutput struct {
// A value that you can use to determine whether the request completed
- // successfully. To get the status of the operation, see GetOperation (https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html)
- // .
+ // successfully. To get the status of the operation, see [GetOperation].
+ //
+ // [GetOperation]: https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html
OperationId *string
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_UpdateService.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_UpdateService.go
index 355c373875..f5dcaf0fb0 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_UpdateService.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/api_op_UpdateService.go
@@ -12,13 +12,18 @@ import (
)
// Submits a request to perform the following operations:
+//
// - Update the TTL setting for existing DnsRecords configurations
-// - Add, update, or delete HealthCheckConfig for a specified service You can't
-// add, update, or delete a HealthCheckCustomConfig configuration.
+//
+// - Add, update, or delete HealthCheckConfig for a specified service
+//
+// You can't add, update, or delete a HealthCheckCustomConfig configuration.
//
// For public and private DNS namespaces, note the following:
+//
// - If you omit any existing DnsRecords or HealthCheckConfig configurations from
// an UpdateService request, the configurations are deleted from the service.
+//
// - If you omit an existing HealthCheckCustomConfig configuration from an
// UpdateService request, the configuration isn't deleted from the service.
//
@@ -58,8 +63,9 @@ type UpdateServiceInput struct {
type UpdateServiceOutput struct {
// A value that you can use to determine whether the request completed
- // successfully. To get the status of the operation, see GetOperation (https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html)
- // .
+ // successfully. To get the status of the operation, see [GetOperation].
+ //
+ // [GetOperation]: https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html
OperationId *string
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/deserializers.go
index 2dbbabd1fc..52a4620c76 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/deserializers.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/deserializers.go
@@ -18,8 +18,17 @@ import (
"io"
"io/ioutil"
"strings"
+ "time"
)
+func deserializeS3Expires(v string) (*time.Time, error) {
+ t, err := smithytime.ParseHTTPDate(v)
+ if err != nil {
+ return nil, nil
+ }
+ return &t, nil
+}
+
type awsAwsjson11_deserializeOpCreateHttpNamespace struct {
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/doc.go
index c9fede0080..ed0353ce70 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/doc.go
@@ -3,11 +3,13 @@
// Package servicediscovery provides the API client, operations, and parameter
// types for AWS Cloud Map.
//
-// Cloud Map With Cloud Map, you can configure public DNS, private DNS, or HTTP
-// namespaces that your microservice applications run in. When an instance becomes
-// available, you can call the Cloud Map API to register the instance with Cloud
-// Map. For public or private DNS namespaces, Cloud Map automatically creates DNS
-// records and an optional health check. Clients that submit public or private DNS
-// queries, or HTTP requests, for the service receive an answer that contains up to
-// eight healthy records.
+// # Cloud Map
+//
+// With Cloud Map, you can configure public DNS, private DNS, or HTTP namespaces
+// that your microservice applications run in. When an instance becomes available,
+// you can call the Cloud Map API to register the instance with Cloud Map. For
+// public or private DNS namespaces, Cloud Map automatically creates DNS records
+// and an optional health check. Clients that submit public or private DNS queries,
+// or HTTP requests, for the service receive an answer that contains up to eight
+// healthy records.
package servicediscovery
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/endpoints.go
index 0fadd03977..252aacd350 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/endpoints.go
@@ -373,7 +373,7 @@ func (r *resolver) ResolveEndpoint(
}
}
if _UseFIPS == true {
- if true == _PartitionResult.SupportsFIPS {
+ if _PartitionResult.SupportsFIPS == true {
uriString := func() string {
var out strings.Builder
out.WriteString("https://servicediscovery-fips.")
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/go_module_metadata.go
index a85a1d9c1f..ce9370d062 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/go_module_metadata.go
@@ -3,4 +3,4 @@
package servicediscovery
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.29.5"
+const goModuleVersion = "1.29.10"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/options.go
index 340a4980aa..ec6b1919dc 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/options.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/options.go
@@ -50,8 +50,10 @@ type Options struct {
// Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a
// value for this field will likely prevent you from using any endpoint-related
// service features released after the introduction of EndpointResolverV2 and
- // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom
- // endpoint, set the client option BaseEndpoint instead.
+ // BaseEndpoint.
+ //
+ // To migrate an EndpointResolver implementation that uses a custom endpoint, set
+ // the client option BaseEndpoint instead.
EndpointResolver EndpointResolver
// Resolves the endpoint used for a particular service operation. This should be
@@ -74,17 +76,20 @@ type Options struct {
// RetryMaxAttempts specifies the maximum number attempts an API client will call
// an operation that fails with a retryable error. A value of 0 is ignored, and
// will not be used to configure the API client created default retryer, or modify
- // per operation call's retry max attempts. If specified in an operation call's
- // functional options with a value that is different than the constructed client's
- // Options, the Client's Retryer will be wrapped to use the operation's specific
- // RetryMaxAttempts value.
+ // per operation call's retry max attempts.
+ //
+ // If specified in an operation call's functional options with a value that is
+ // different than the constructed client's Options, the Client's Retryer will be
+ // wrapped to use the operation's specific RetryMaxAttempts value.
RetryMaxAttempts int
// RetryMode specifies the retry mode the API client will be created with, if
- // Retryer option is not also specified. When creating a new API Clients this
- // member will only be used if the Retryer Options member is nil. This value will
- // be ignored if Retryer is not nil. Currently does not support per operation call
- // overrides, may in the future.
+ // Retryer option is not also specified.
+ //
+ // When creating a new API Clients this member will only be used if the Retryer
+ // Options member is nil. This value will be ignored if Retryer is not nil.
+ //
+ // Currently does not support per operation call overrides, may in the future.
RetryMode aws.RetryMode
// Retryer guides how HTTP requests should be retried in case of recoverable
@@ -101,8 +106,9 @@ type Options struct {
// The initial DefaultsMode used when the client options were constructed. If the
// DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
- // value was at that point in time. Currently does not support per operation call
- // overrides, may in the future.
+ // value was at that point in time.
+ //
+ // Currently does not support per operation call overrides, may in the future.
resolvedDefaultsMode aws.DefaultsMode
// The HTTP client to invoke API calls with. Defaults to client's default HTTP
@@ -147,6 +153,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for
// this field will likely prevent you from using any endpoint-related service
// features released after the introduction of EndpointResolverV2 and BaseEndpoint.
+//
// To migrate an EndpointResolver implementation that uses a custom endpoint, set
// the client option BaseEndpoint instead.
func WithEndpointResolver(v EndpointResolver) func(*Options) {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/types/enums.go
index b121366530..f59925e2a2 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/types/enums.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/types/enums.go
@@ -11,8 +11,9 @@ const (
)
// Values returns all known values for CustomHealthStatus. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (CustomHealthStatus) Values() []CustomHealthStatus {
return []CustomHealthStatus{
"HEALTHY",
@@ -31,8 +32,9 @@ const (
)
// Values returns all known values for FilterCondition. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (FilterCondition) Values() []FilterCondition {
return []FilterCondition{
"EQ",
@@ -52,8 +54,9 @@ const (
)
// Values returns all known values for HealthCheckType. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (HealthCheckType) Values() []HealthCheckType {
return []HealthCheckType{
"HTTP",
@@ -72,8 +75,9 @@ const (
)
// Values returns all known values for HealthStatus. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (HealthStatus) Values() []HealthStatus {
return []HealthStatus{
"HEALTHY",
@@ -93,8 +97,9 @@ const (
)
// Values returns all known values for HealthStatusFilter. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (HealthStatusFilter) Values() []HealthStatusFilter {
return []HealthStatusFilter{
"HEALTHY",
@@ -114,8 +119,9 @@ const (
)
// Values returns all known values for NamespaceFilterName. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (NamespaceFilterName) Values() []NamespaceFilterName {
return []NamespaceFilterName{
"TYPE",
@@ -134,8 +140,9 @@ const (
)
// Values returns all known values for NamespaceType. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (NamespaceType) Values() []NamespaceType {
return []NamespaceType{
"DNS_PUBLIC",
@@ -156,8 +163,9 @@ const (
)
// Values returns all known values for OperationFilterName. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (OperationFilterName) Values() []OperationFilterName {
return []OperationFilterName{
"NAMESPACE_ID",
@@ -179,8 +187,9 @@ const (
)
// Values returns all known values for OperationStatus. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (OperationStatus) Values() []OperationStatus {
return []OperationStatus{
"SUBMITTED",
@@ -200,8 +209,9 @@ const (
)
// Values returns all known values for OperationTargetType. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (OperationTargetType) Values() []OperationTargetType {
return []OperationTargetType{
"NAMESPACE",
@@ -223,8 +233,9 @@ const (
)
// Values returns all known values for OperationType. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (OperationType) Values() []OperationType {
return []OperationType{
"CREATE_NAMESPACE",
@@ -247,8 +258,9 @@ const (
)
// Values returns all known values for RecordType. Note that this can be expanded
-// in the future, and so it is only as up to date as the client. The ordering of
-// this slice is not guaranteed to be stable across updates.
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (RecordType) Values() []RecordType {
return []RecordType{
"SRV",
@@ -267,8 +279,9 @@ const (
)
// Values returns all known values for RoutingPolicy. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (RoutingPolicy) Values() []RoutingPolicy {
return []RoutingPolicy{
"MULTIVALUE",
@@ -284,8 +297,9 @@ const (
)
// Values returns all known values for ServiceFilterName. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (ServiceFilterName) Values() []ServiceFilterName {
return []ServiceFilterName{
"NAMESPACE_ID",
@@ -302,8 +316,9 @@ const (
)
// Values returns all known values for ServiceType. Note that this can be expanded
-// in the future, and so it is only as up to date as the client. The ordering of
-// this slice is not guaranteed to be stable across updates.
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (ServiceType) Values() []ServiceType {
return []ServiceType{
"HTTP",
@@ -320,8 +335,9 @@ const (
)
// Values returns all known values for ServiceTypeOption. Note that this can be
-// expanded in the future, and so it is only as up to date as the client. The
-// ordering of this slice is not guaranteed to be stable across updates.
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
func (ServiceTypeOption) Values() []ServiceTypeOption {
return []ServiceTypeOption{
"HTTP",
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/types/errors.go
index a7a922266c..faeb64dd98 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/types/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/types/errors.go
@@ -199,9 +199,9 @@ func (e *OperationNotFound) ErrorCode() string {
func (e *OperationNotFound) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// The operation can't be completed because you've reached the quota for the
-// number of requests. For more information, see Cloud Map API request throttling
-// quota (https://docs.aws.amazon.com/cloud-map/latest/dg/throttling.html) in the
-// Cloud Map Developer Guide.
+// number of requests. For more information, see [Cloud Map API request throttling quota]in the Cloud Map Developer Guide.
+//
+// [Cloud Map API request throttling quota]: https://docs.aws.amazon.com/cloud-map/latest/dg/throttling.html
type RequestLimitExceeded struct {
Message *string
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/types/types.go
index 018f7fefa7..f0921d6cee 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/types/types.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/servicediscovery/types/types.go
@@ -8,9 +8,10 @@ import (
)
// A complex type that contains information about the Amazon Route 53 DNS records
-// that you want Cloud Map to create when you register an instance. The record
-// types of a service can only be changed by deleting the service and recreating it
-// with a new Dnsconfig .
+// that you want Cloud Map to create when you register an instance.
+//
+// The record types of a service can only be changed by deleting the service and
+// recreating it with a new Dnsconfig .
type DnsConfig struct {
// An array that contains one DnsRecord object for each Route 53 DNS record that
@@ -19,40 +20,59 @@ type DnsConfig struct {
// This member is required.
DnsRecords []DnsRecord
- // Use NamespaceId in Service (https://docs.aws.amazon.com/cloud-map/latest/api/API_Service.html)
- // instead. The ID of the namespace to use for DNS configuration.
+ // Use NamespaceId in [Service] instead.
+ //
+ // The ID of the namespace to use for DNS configuration.
+ //
+ // [Service]: https://docs.aws.amazon.com/cloud-map/latest/api/API_Service.html
//
// Deprecated: Top level attribute in request should be used to reference
// namespace-id
NamespaceId *string
// The routing policy that you want to apply to all Route 53 DNS records that
- // Cloud Map creates when you register an instance and specify this service. If you
- // want to use this service to register instances that create alias records,
- // specify WEIGHTED for the routing policy. You can specify the following values:
+ // Cloud Map creates when you register an instance and specify this service.
+ //
+ // If you want to use this service to register instances that create alias
+ // records, specify WEIGHTED for the routing policy.
+ //
+ // You can specify the following values:
+ //
// MULTIVALUE If you define a health check for the service and the health check is
- // healthy, Route 53 returns the applicable value for up to eight instances. For
- // example, suppose that the service includes configurations for one A record and
- // a health check. You use the service to register 10 instances. Route 53 responds
- // to DNS queries with IP addresses for up to eight healthy instances. If fewer
- // than eight instances are healthy, Route 53 responds to every DNS query with the
- // IP addresses for all of the healthy instances. If you don't define a health
- // check for the service, Route 53 assumes that all instances are healthy and
- // returns the values for up to eight instances. For more information about the
- // multivalue routing policy, see Multivalue Answer Routing (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-multivalue)
- // in the Route 53 Developer Guide. WEIGHTED Route 53 returns the applicable value
- // from one randomly selected instance from among the instances that you registered
- // using the same service. Currently, all records have the same weight, so you
- // can't route more or less traffic to any instances. For example, suppose that the
- // service includes configurations for one A record and a health check. You use
- // the service to register 10 instances. Route 53 responds to DNS queries with the
- // IP address for one randomly selected instance from among the healthy instances.
- // If no instances are healthy, Route 53 responds to DNS queries as if all of the
- // instances were healthy. If you don't define a health check for the service,
- // Route 53 assumes that all instances are healthy and returns the applicable value
- // for one randomly selected instance. For more information about the weighted
- // routing policy, see Weighted Routing (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-weighted)
- // in the Route 53 Developer Guide.
+ // healthy, Route 53 returns the applicable value for up to eight instances.
+ //
+ // For example, suppose that the service includes configurations for one A record
+ // and a health check. You use the service to register 10 instances. Route 53
+ // responds to DNS queries with IP addresses for up to eight healthy instances. If
+ // fewer than eight instances are healthy, Route 53 responds to every DNS query
+ // with the IP addresses for all of the healthy instances.
+ //
+ // If you don't define a health check for the service, Route 53 assumes that all
+ // instances are healthy and returns the values for up to eight instances.
+ //
+ // For more information about the multivalue routing policy, see [Multivalue Answer Routing] in the Route 53
+ // Developer Guide.
+ //
+ // WEIGHTED Route 53 returns the applicable value from one randomly selected
+ // instance from among the instances that you registered using the same service.
+ // Currently, all records have the same weight, so you can't route more or less
+ // traffic to any instances.
+ //
+ // For example, suppose that the service includes configurations for one A record
+ // and a health check. You use the service to register 10 instances. Route 53
+ // responds to DNS queries with the IP address for one randomly selected instance
+ // from among the healthy instances. If no instances are healthy, Route 53 responds
+ // to DNS queries as if all of the instances were healthy.
+ //
+ // If you don't define a health check for the service, Route 53 assumes that all
+ // instances are healthy and returns the applicable value for one randomly selected
+ // instance.
+ //
+ // For more information about the weighted routing policy, see [Weighted Routing] in the Route 53
+ // Developer Guide.
+ //
+ // [Weighted Routing]: https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-weighted
+ // [Multivalue Answer Routing]: https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-multivalue
RoutingPolicy RoutingPolicy
noSmithyDocumentSerde
@@ -90,13 +110,15 @@ type DnsProperties struct {
type DnsRecord struct {
// The amount of time, in seconds, that you want DNS resolvers to cache the
- // settings for this record. Alias records don't include a TTL because Route 53
- // uses the TTL for the Amazon Web Services resource that an alias record routes
- // traffic to. If you include the AWS_ALIAS_DNS_NAME attribute when you submit a
- // RegisterInstance (https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html)
- // request, the TTL value is ignored. Always specify a TTL for the service; you
- // can use a service to register instances that create either alias or non-alias
- // records.
+ // settings for this record.
+ //
+ // Alias records don't include a TTL because Route 53 uses the TTL for the Amazon
+ // Web Services resource that an alias record routes traffic to. If you include the
+ // AWS_ALIAS_DNS_NAME attribute when you submit a [RegisterInstance] request, the TTL value is
+ // ignored. Always specify a TTL for the service; you can use a service to register
+ // instances that create either alias or non-alias records.
+ //
+ // [RegisterInstance]: https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html
//
// This member is required.
TTL *int64
@@ -104,50 +126,82 @@ type DnsRecord struct {
// The type of the resource, which indicates the type of value that Route 53
// returns in response to DNS queries. You can specify values for Type in the
// following combinations:
+ //
// - A
+ //
// - AAAA
+ //
// - A and AAAA
+ //
// - SRV
+ //
// - CNAME
+ //
// If you want Cloud Map to create a Route 53 alias record when you register an
- // instance, specify A or AAAA for Type . You specify other settings, such as the
- // IP address for A and AAAA records, when you register an instance. For more
- // information, see RegisterInstance (https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html)
- // . The following values are supported: A Route 53 returns the IP address of the
- // resource in IPv4 format, such as 192.0.2.44. AAAA Route 53 returns the IP
- // address of the resource in IPv6 format, such as
- // 2001:0db8:85a3:0000:0000:abcd:0001:2345. CNAME Route 53 returns the domain name
- // of the resource, such as www.example.com. Note the following:
+ // instance, specify A or AAAA for Type .
+ //
+ // You specify other settings, such as the IP address for A and AAAA records, when
+ // you register an instance. For more information, see [RegisterInstance].
+ //
+ // The following values are supported:
+ //
+ // A Route 53 returns the IP address of the resource in IPv4 format, such as
+ // 192.0.2.44.
+ //
+ // AAAA Route 53 returns the IP address of the resource in IPv6 format, such as
+ // 2001:0db8:85a3:0000:0000:abcd:0001:2345.
+ //
+ // CNAME Route 53 returns the domain name of the resource, such as
+ // www.example.com. Note the following:
+ //
// - You specify the domain name that you want to route traffic to when you
- // register an instance. For more information, see Attributes (https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html#cloudmap-RegisterInstance-request-Attributes)
- // in the topic RegisterInstance (https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html)
- // .
+ // register an instance. For more information, see [Attributes]in the topic [RegisterInstance].
+ //
// - You must specify WEIGHTED for the value of RoutingPolicy .
+ //
// - You can't specify both CNAME for Type and settings for HealthCheckConfig .
// If you do, the request will fail with an InvalidInput error.
+ //
// SRV Route 53 returns the value for an SRV record. The value for an SRV record
- // uses the following values: priority weight port service-hostname Note the
- // following about the values:
+ // uses the following values:
+ //
+ // priority weight port service-hostname
+ //
+ // Note the following about the values:
+ //
// - The values of priority and weight are both set to 1 and can't be changed.
+ //
// - The value of port comes from the value that you specify for the
- // AWS_INSTANCE_PORT attribute when you submit a RegisterInstance (https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html)
- // request.
+ // AWS_INSTANCE_PORT attribute when you submit a [RegisterInstance]request.
+ //
// - The value of service-hostname is a concatenation of the following values:
+ //
// - The value that you specify for InstanceId when you register an instance.
+ //
// - The name of the service.
- // - The name of the namespace. For example, if the value of InstanceId is test ,
- // the name of the service is backend , and the name of the namespace is
- // example.com , the value of service-hostname is the following:
- // test.backend.example.com
+ //
+ // - The name of the namespace.
+ //
+ // For example, if the value of InstanceId is test , the name of the service is
+ // backend , and the name of the namespace is example.com , the value of
+ // service-hostname is the following:
+ //
+ // test.backend.example.com
+ //
// If you specify settings for an SRV record, note the following:
+ //
// - If you specify values for AWS_INSTANCE_IPV4 , AWS_INSTANCE_IPV6 , or both in
// the RegisterInstance request, Cloud Map automatically creates A and/or AAAA
// records that have the same name as the value of service-hostname in the SRV
// record. You can ignore these records.
+ //
// - If you're using a system that requires a specific SRV format, such as
- // HAProxy, see the Name (https://docs.aws.amazon.com/cloud-map/latest/api/API_CreateService.html#cloudmap-CreateService-request-Name)
- // element in the documentation about CreateService for information about how to
- // specify the correct name format.
+ // HAProxy, see the [Name]element in the documentation about CreateService for
+ // information about how to specify the correct name format.
+ //
+ // [Attributes]: https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html#cloudmap-RegisterInstance-request-Attributes
+ // [RegisterInstance]: https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html
+ // [Name]: https://docs.aws.amazon.com/cloud-map/latest/api/API_CreateService.html#cloudmap-CreateService-request-Name
//
// This member is required.
Type RecordType
@@ -155,69 +209,95 @@ type DnsRecord struct {
noSmithyDocumentSerde
}
-// Public DNS and HTTP namespaces only. A complex type that contains settings for
+// Public DNS and HTTP namespaces only. A complex type that contains settings for
+//
// an optional health check. If you specify settings for a health check, Cloud Map
-// associates the health check with the records that you specify in DnsConfig . If
-// you specify a health check configuration, you can specify either
-// HealthCheckCustomConfig or HealthCheckConfig but not both. Health checks are
-// basic Route 53 health checks that monitor an Amazon Web Services endpoint. For
-// information about pricing for health checks, see Amazon Route 53 Pricing (http://aws.amazon.com/route53/pricing/)
-// . Note the following about configuring health checks. A and AAAA records If
-// DnsConfig includes configurations for both A and AAAA records, Cloud Map
-// creates a health check that uses the IPv4 address to check the health of the
-// resource. If the endpoint tthat's specified by the IPv4 address is unhealthy,
-// Route 53 considers both the A and AAAA records to be unhealthy. CNAME records
-// You can't specify settings for HealthCheckConfig when the DNSConfig includes
-// CNAME for the value of Type . If you do, the CreateService request will fail
-// with an InvalidInput error. Request interval A Route 53 health checker in each
-// health-checking Amazon Web Services Region sends a health check request to an
-// endpoint every 30 seconds. On average, your endpoint receives a health check
-// request about every two seconds. However, health checkers don't coordinate with
-// one another. Therefore, you might sometimes see several requests in one second
-// that's followed by a few seconds with no health checks at all. Health checking
-// regions Health checkers perform checks from all Route 53 health-checking
-// Regions. For a list of the current Regions, see Regions (https://docs.aws.amazon.com/Route53/latest/APIReference/API_HealthCheckConfig.html#Route53-Type-HealthCheckConfig-Regions)
-// . Alias records When you register an instance, if you include the
+// associates the health check with the records that you specify in DnsConfig .
+//
+// If you specify a health check configuration, you can specify either
+// HealthCheckCustomConfig or HealthCheckConfig but not both.
+//
+// Health checks are basic Route 53 health checks that monitor an Amazon Web
+// Services endpoint. For information about pricing for health checks, see [Amazon Route 53 Pricing].
+//
+// Note the following about configuring health checks.
+//
+// A and AAAA records If DnsConfig includes configurations for both A and AAAA
+// records, Cloud Map creates a health check that uses the IPv4 address to check
+// the health of the resource. If the endpoint tthat's specified by the IPv4
+// address is unhealthy, Route 53 considers both the A and AAAA records to be
+// unhealthy.
+//
+// CNAME records You can't specify settings for HealthCheckConfig when the
+// DNSConfig includes CNAME for the value of Type . If you do, the CreateService
+// request will fail with an InvalidInput error.
+//
+// Request interval A Route 53 health checker in each health-checking Amazon Web
+// Services Region sends a health check request to an endpoint every 30 seconds. On
+// average, your endpoint receives a health check request about every two seconds.
+// However, health checkers don't coordinate with one another. Therefore, you might
+// sometimes see several requests in one second that's followed by a few seconds
+// with no health checks at all.
+//
+// Health checking regions Health checkers perform checks from all Route 53
+// health-checking Regions. For a list of the current Regions, see [Regions].
+//
+// Alias records When you register an instance, if you include the
// AWS_ALIAS_DNS_NAME attribute, Cloud Map creates a Route 53 alias record. Note
// the following:
+//
// - Route 53 automatically sets EvaluateTargetHealth to true for alias records.
// When EvaluateTargetHealth is true, the alias record inherits the health of the
// referenced Amazon Web Services resource. such as an ELB load balancer. For more
-// information, see EvaluateTargetHealth (https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html#Route53-Type-AliasTarget-EvaluateTargetHealth)
-// .
+// information, see [EvaluateTargetHealth].
+//
// - If you include HealthCheckConfig and then use the service to register an
// instance that creates an alias record, Route 53 doesn't create the health check.
//
// Charges for health checks Health checks are basic Route 53 health checks that
// monitor an Amazon Web Services endpoint. For information about pricing for
-// health checks, see Amazon Route 53 Pricing (http://aws.amazon.com/route53/pricing/)
-// .
+// health checks, see [Amazon Route 53 Pricing].
+//
+// [Regions]: https://docs.aws.amazon.com/Route53/latest/APIReference/API_HealthCheckConfig.html#Route53-Type-HealthCheckConfig-Regions
+// [Amazon Route 53 Pricing]: http://aws.amazon.com/route53/pricing/
+// [EvaluateTargetHealth]: https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html#Route53-Type-AliasTarget-EvaluateTargetHealth
type HealthCheckConfig struct {
// The type of health check that you want to create, which indicates how Route 53
- // determines whether an endpoint is healthy. You can't change the value of Type
- // after you create a health check. You can create the following types of health
- // checks:
+ // determines whether an endpoint is healthy.
+ //
+ // You can't change the value of Type after you create a health check.
+ //
+ // You can create the following types of health checks:
+ //
// - HTTP: Route 53 tries to establish a TCP connection. If successful, Route 53
// submits an HTTP request and waits for an HTTP status code of 200 or greater and
// less than 400.
+ //
// - HTTPS: Route 53 tries to establish a TCP connection. If successful, Route
// 53 submits an HTTPS request and waits for an HTTP status code of 200 or greater
- // and less than 400. If you specify HTTPS for the value of Type , the endpoint
- // must support TLS v1.0 or later.
- // - TCP: Route 53 tries to establish a TCP connection. If you specify TCP for
- // Type , don't specify a value for ResourcePath .
- // For more information, see How Route 53 Determines Whether an Endpoint Is Healthy (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html)
- // in the Route 53 Developer Guide.
+ // and less than 400.
+ //
+ // If you specify HTTPS for the value of Type , the endpoint must support TLS v1.0
+ // or later.
+ //
+ // - TCP: Route 53 tries to establish a TCP connection.
+ //
+ // If you specify TCP for Type , don't specify a value for ResourcePath .
+ //
+ // For more information, see [How Route 53 Determines Whether an Endpoint Is Healthy] in the Route 53 Developer Guide.
+ //
+ // [How Route 53 Determines Whether an Endpoint Is Healthy]: https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html
//
// This member is required.
Type HealthCheckType
// The number of consecutive health checks that an endpoint must pass or fail for
// Route 53 to change the current status of the endpoint from unhealthy to healthy
- // or the other way around. For more information, see How Route 53 Determines
- // Whether an Endpoint Is Healthy (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html)
- // in the Route 53 Developer Guide.
+ // or the other way around. For more information, see [How Route 53 Determines Whether an Endpoint Is Healthy]in the Route 53 Developer
+ // Guide.
+ //
+ // [How Route 53 Determines Whether an Endpoint Is Healthy]: https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html
FailureThreshold *int32
// The path that you want Route 53 to request when performing health checks. The
@@ -225,8 +305,9 @@ type HealthCheckConfig struct {
// 3xx format for when the endpoint is healthy. An example file is
// /docs/route53-health-check.html . Route 53 automatically adds the DNS name for
// the service. If you don't specify a value for ResourcePath , the default value
- // is / . If you specify TCP for Type , you must not specify a value for
- // ResourcePath .
+ // is / .
+ //
+ // If you specify TCP for Type , you must not specify a value for ResourcePath .
ResourcePath *string
noSmithyDocumentSerde
@@ -236,28 +317,41 @@ type HealthCheckConfig struct {
// A custom health check, which requires that you use a third-party health checker
// to evaluate the health of your resources, is useful in the following
// circumstances:
+//
// - You can't use a health check that's defined by HealthCheckConfig because the
// resource isn't available over the internet. For example, you can use a custom
// health check when the instance is in an Amazon VPC. (To check the health of
// resources in a VPC, the health checker must also be in the VPC.)
+//
// - You want to use a third-party health checker regardless of where your
// resources are located.
//
// If you specify a health check configuration, you can specify either
-// HealthCheckCustomConfig or HealthCheckConfig but not both. To change the status
-// of a custom health check, submit an UpdateInstanceCustomHealthStatus request.
-// Cloud Map doesn't monitor the status of the resource, it just keeps a record of
-// the status specified in the most recent UpdateInstanceCustomHealthStatus
-// request. Here's how custom health checks work:
+// HealthCheckCustomConfig or HealthCheckConfig but not both.
+//
+// To change the status of a custom health check, submit an
+// UpdateInstanceCustomHealthStatus request. Cloud Map doesn't monitor the status
+// of the resource, it just keeps a record of the status specified in the most
+// recent UpdateInstanceCustomHealthStatus request.
+//
+// Here's how custom health checks work:
+//
// - You create a service.
+//
// - You register an instance.
+//
// - You configure a third-party health checker to monitor the resource that's
-// associated with the new instance. Cloud Map doesn't check the health of the
-// resource directly.
+// associated with the new instance.
+//
+// Cloud Map doesn't check the health of the resource directly.
+//
// - The third-party health-checker determines that the resource is unhealthy
// and notifies your application.
+//
// - Your application submits an UpdateInstanceCustomHealthStatus request.
+//
// - Cloud Map waits for 30 seconds.
+//
// - If another UpdateInstanceCustomHealthStatus request doesn't arrive during
// that time to change the status back to healthy, Cloud Map stops routing traffic
// to the resource.
@@ -265,13 +359,15 @@ type HealthCheckCustomConfig struct {
// This parameter is no longer supported and is always set to 1. Cloud Map waits
// for approximately 30 seconds after receiving an UpdateInstanceCustomHealthStatus
- // request before changing the status of the service instance. The number of
- // 30-second intervals that you want Cloud Map to wait after receiving an
- // UpdateInstanceCustomHealthStatus request before it changes the health status of
- // a service instance. Sending a second or subsequent
- // UpdateInstanceCustomHealthStatus request with the same value before 30 seconds
- // has passed doesn't accelerate the change. Cloud Map still waits 30 seconds
- // after the first request to make the change.
+ // request before changing the status of the service instance.
+ //
+ // The number of 30-second intervals that you want Cloud Map to wait after
+ // receiving an UpdateInstanceCustomHealthStatus request before it changes the
+ // health status of a service instance.
+ //
+ // Sending a second or subsequent UpdateInstanceCustomHealthStatus request with
+ // the same value before 30 seconds has passed doesn't accelerate the change. Cloud
+ // Map still waits 30 seconds after the first request to make the change.
//
// Deprecated: Configurable FailureThreshold of HealthCheckCustomConfig is
// deprecated. It will always have value 1.
@@ -280,9 +376,10 @@ type HealthCheckCustomConfig struct {
noSmithyDocumentSerde
}
-// In a response to a DiscoverInstances (https://docs.aws.amazon.com/cloud-map/latest/api/API_DiscoverInstances.html)
-// request, HttpInstanceSummary contains information about one instance that
-// matches the values that you specified in the request.
+// In a response to a [DiscoverInstances] request, HttpInstanceSummary contains information about one
+// instance that matches the values that you specified in the request.
+//
+// [DiscoverInstances]: https://docs.aws.amazon.com/cloud-map/latest/api/API_DiscoverInstances.html
type HttpInstanceSummary struct {
// If you included any attributes when you registered the instance, the values of
@@ -331,70 +428,106 @@ type HttpProperties struct {
type Instance struct {
// An identifier that you want to associate with the instance. Note the following:
+ //
// - If the service that's specified by ServiceId includes settings for an SRV
// record, the value of InstanceId is automatically included as part of the value
- // for the SRV record. For more information, see DnsRecord > Type (https://docs.aws.amazon.com/cloud-map/latest/api/API_DnsRecord.html#cloudmap-Type-DnsRecord-Type)
- // .
+ // for the SRV record. For more information, see [DnsRecord > Type].
+ //
// - You can use this value to update an existing instance.
+ //
// - To register a new instance, you must specify a value that's unique among
// instances that you register by using the same service.
+ //
// - If you specify an existing InstanceId and ServiceId , Cloud Map updates the
// existing DNS records. If there's also an existing health check, Cloud Map
- // deletes the old health check and creates a new one. The health check isn't
- // deleted immediately, so it will still appear for a while if you submit a
- // ListHealthChecks request, for example.
+ // deletes the old health check and creates a new one.
+ //
+ // The health check isn't deleted immediately, so it will still appear for a while
+ // if you submit a ListHealthChecks request, for example.
+ //
+ // [DnsRecord > Type]: https://docs.aws.amazon.com/cloud-map/latest/api/API_DnsRecord.html#cloudmap-Type-DnsRecord-Type
//
// This member is required.
Id *string
// A string map that contains the following information for the service that you
// specify in ServiceId :
+ //
// - The attributes that apply to the records that are defined in the service.
+ //
// - For each attribute, the applicable value.
+ //
// Do not include sensitive information in the attributes if the namespace is
- // discoverable by public DNS queries. Supported attribute keys include the
- // following: AWS_ALIAS_DNS_NAME If you want Cloud Map to create a Route 53 alias
- // record that routes traffic to an Elastic Load Balancing load balancer, specify
- // the DNS name that's associated with the load balancer. For information about how
- // to get the DNS name, see AliasTarget->DNSName (https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html#Route53-Type-AliasTarget-DNSName)
- // in the Route 53 API Reference. Note the following:
+ // discoverable by public DNS queries.
+ //
+ // Supported attribute keys include the following:
+ //
+ // AWS_ALIAS_DNS_NAME If you want Cloud Map to create a Route 53 alias record that
+ // routes traffic to an Elastic Load Balancing load balancer, specify the DNS name
+ // that's associated with the load balancer. For information about how to get the
+ // DNS name, see [AliasTarget->DNSName]in the Route 53 API Reference.
+ //
+ // Note the following:
+ //
// - The configuration for the service that's specified by ServiceId must include
// settings for an A record, an AAAA record, or both.
+ //
// - In the service that's specified by ServiceId , the value of RoutingPolicy
// must be WEIGHTED .
+ //
// - If the service that's specified by ServiceId includes HealthCheckConfig
// settings, Cloud Map creates the health check, but it won't associate the health
// check with the alias record.
+ //
// - Auto naming currently doesn't support creating alias records that route
// traffic to Amazon Web Services resources other than ELB load balancers.
+ //
// - If you specify a value for AWS_ALIAS_DNS_NAME , don't specify values for any
// of the AWS_INSTANCE attributes.
- // AWS_EC2_INSTANCE_ID HTTP namespaces only. The Amazon EC2 instance ID for the
+ //
+ // AWS_EC2_INSTANCE_ID HTTP namespaces only. The Amazon EC2 instance ID for the
// instance. The AWS_INSTANCE_IPV4 attribute contains the primary private IPv4
- // address. AWS_INIT_HEALTH_STATUS If the service configuration includes
+ // address.
+ //
+ // AWS_INIT_HEALTH_STATUS If the service configuration includes
// HealthCheckCustomConfig , you can optionally use AWS_INIT_HEALTH_STATUS to
// specify the initial status of the custom health check, HEALTHY or UNHEALTHY . If
// you don't specify a value for AWS_INIT_HEALTH_STATUS , the initial status is
- // HEALTHY . AWS_INSTANCE_CNAME If the service configuration includes a CNAME
- // record, the domain name that you want Route 53 to return in response to DNS
- // queries (for example, example.com ). This value is required if the service
- // specified by ServiceId includes settings for an CNAME record. AWS_INSTANCE_IPV4
- // If the service configuration includes an A record, the IPv4 address that you
- // want Route 53 to return in response to DNS queries (for example, 192.0.2.44 ).
+ // HEALTHY .
+ //
+ // AWS_INSTANCE_CNAME If the service configuration includes a CNAME record, the
+ // domain name that you want Route 53 to return in response to DNS queries (for
+ // example, example.com ).
+ //
+ // This value is required if the service specified by ServiceId includes settings
+ // for an CNAME record.
+ //
+ // AWS_INSTANCE_IPV4 If the service configuration includes an A record, the IPv4
+ // address that you want Route 53 to return in response to DNS queries (for
+ // example, 192.0.2.44 ).
+ //
// This value is required if the service specified by ServiceId includes settings
// for an A record. If the service includes settings for an SRV record, you must
// specify a value for AWS_INSTANCE_IPV4 , AWS_INSTANCE_IPV6 , or both.
+ //
// AWS_INSTANCE_IPV6 If the service configuration includes an AAAA record, the
// IPv6 address that you want Route 53 to return in response to DNS queries (for
- // example, 2001:0db8:85a3:0000:0000:abcd:0001:2345 ). This value is required if
- // the service specified by ServiceId includes settings for an AAAA record. If the
- // service includes settings for an SRV record, you must specify a value for
- // AWS_INSTANCE_IPV4 , AWS_INSTANCE_IPV6 , or both. AWS_INSTANCE_PORT If the
- // service includes an SRV record, the value that you want Route 53 to return for
- // the port. If the service includes HealthCheckConfig , the port on the endpoint
- // that you want Route 53 to send requests to. This value is required if you
- // specified settings for an SRV record or a Route 53 health check when you
- // created the service.
+ // example, 2001:0db8:85a3:0000:0000:abcd:0001:2345 ).
+ //
+ // This value is required if the service specified by ServiceId includes settings
+ // for an AAAA record. If the service includes settings for an SRV record, you
+ // must specify a value for AWS_INSTANCE_IPV4 , AWS_INSTANCE_IPV6 , or both.
+ //
+ // AWS_INSTANCE_PORT If the service includes an SRV record, the value that you
+ // want Route 53 to return for the port.
+ //
+ // If the service includes HealthCheckConfig , the port on the endpoint that you
+ // want Route 53 to send requests to.
+ //
+ // This value is required if you specified settings for an SRV record or a Route
+ // 53 health check when you created the service.
+ //
+ // [AliasTarget->DNSName]: https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html#Route53-Type-AliasTarget-DNSName
Attributes map[string]string
// A unique string that identifies the request and that allows failed
@@ -413,27 +546,38 @@ type Instance struct {
type InstanceSummary struct {
// A string map that contains the following information:
+ //
// - The attributes that are associated with the instance.
+ //
// - For each attribute, the applicable value.
- // Supported attribute keys include the following: AWS_ALIAS_DNS_NAME For an alias
- // record that routes traffic to an Elastic Load Balancing load balancer, the DNS
- // name that's associated with the load balancer. AWS_EC2_INSTANCE_ID (HTTP
- // namespaces only) The Amazon EC2 instance ID for the instance. When the
- // AWS_EC2_INSTANCE_ID attribute is specified, then the AWS_INSTANCE_IPV4
- // attribute contains the primary private IPv4 address. AWS_INIT_HEALTH_STATUS If
- // the service configuration includes HealthCheckCustomConfig , you can optionally
- // use AWS_INIT_HEALTH_STATUS to specify the initial status of the custom health
- // check, HEALTHY or UNHEALTHY . If you don't specify a value for
- // AWS_INIT_HEALTH_STATUS , the initial status is HEALTHY . AWS_INSTANCE_CNAME For
- // a CNAME record, the domain name that Route 53 returns in response to DNS
- // queries (for example, example.com ). AWS_INSTANCE_IPV4 For an A record, the
- // IPv4 address that Route 53 returns in response to DNS queries (for example,
- // 192.0.2.44 ). AWS_INSTANCE_IPV6 For an AAAA record, the IPv6 address that Route
- // 53 returns in response to DNS queries (for example,
- // 2001:0db8:85a3:0000:0000:abcd:0001:2345 ). AWS_INSTANCE_PORT For an SRV record,
- // the value that Route 53 returns for the port. In addition, if the service
- // includes HealthCheckConfig , the port on the endpoint that Route 53 sends
- // requests to.
+ //
+ // Supported attribute keys include the following:
+ //
+ // AWS_ALIAS_DNS_NAME For an alias record that routes traffic to an Elastic Load
+ // Balancing load balancer, the DNS name that's associated with the load balancer.
+ //
+ // AWS_EC2_INSTANCE_ID (HTTP namespaces only) The Amazon EC2 instance ID for the
+ // instance. When the AWS_EC2_INSTANCE_ID attribute is specified, then the
+ // AWS_INSTANCE_IPV4 attribute contains the primary private IPv4 address.
+ //
+ // AWS_INIT_HEALTH_STATUS If the service configuration includes
+ // HealthCheckCustomConfig , you can optionally use AWS_INIT_HEALTH_STATUS to
+ // specify the initial status of the custom health check, HEALTHY or UNHEALTHY . If
+ // you don't specify a value for AWS_INIT_HEALTH_STATUS , the initial status is
+ // HEALTHY .
+ //
+ // AWS_INSTANCE_CNAME For a CNAME record, the domain name that Route 53 returns in
+ // response to DNS queries (for example, example.com ).
+ //
+ // AWS_INSTANCE_IPV4 For an A record, the IPv4 address that Route 53 returns in
+ // response to DNS queries (for example, 192.0.2.44 ).
+ //
+ // AWS_INSTANCE_IPV6 For an AAAA record, the IPv6 address that Route 53 returns in
+ // response to DNS queries (for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345 ).
+ //
+ // AWS_INSTANCE_PORT For an SRV record, the value that Route 53 returns for the
+ // port. In addition, if the service includes HealthCheckConfig , the port on the
+ // endpoint that Route 53 sends requests to.
Attributes map[string]string
// The ID for an instance that you created by using a specified service.
@@ -476,9 +620,14 @@ type Namespace struct {
ServiceCount *int32
// The type of the namespace. The methods for discovering instances depends on the
- // value that you specify: HTTP Instances can be discovered only programmatically,
- // using the Cloud Map DiscoverInstances API. DNS_PUBLIC Instances can be
- // discovered using public DNS queries and using the DiscoverInstances API.
+ // value that you specify:
+ //
+ // HTTP Instances can be discovered only programmatically, using the Cloud Map
+ // DiscoverInstances API.
+ //
+ // DNS_PUBLIC Instances can be discovered using public DNS queries and using the
+ // DiscoverInstances API.
+ //
// DNS_PRIVATE Instances can be discovered using DNS queries in VPCs and using the
// DiscoverInstances API.
Type NamespaceType
@@ -491,16 +640,22 @@ type Namespace struct {
type NamespaceFilter struct {
// Specify the namespaces that you want to get using one of the following.
+ //
// - TYPE : Gets the namespaces of the specified type.
+ //
// - NAME : Gets the namespaces with the specified name.
+ //
// - HTTP_NAME : Gets the namespaces with the specified HTTP name.
//
// This member is required.
Name NamespaceFilterName
// Specify the values that are applicable to the value that you specify for Name .
+ //
// - TYPE : Specify HTTP , DNS_PUBLIC , or DNS_PRIVATE .
+ //
// - NAME : Specify the name of the namespace, which is found in Namespace.Name .
+ //
// - HTTP_NAME : Specify the HTTP name of the namespace, which is found in
// Namespace.Properties.HttpProperties.HttpName .
//
@@ -510,9 +665,11 @@ type NamespaceFilter struct {
// Specify the operator that you want to use to determine whether a namespace
// matches the specified value. Valid values for Condition are one of the
// following.
+ //
// - EQ : When you specify EQ for Condition , you can specify only one value. EQ
// is supported for TYPE , NAME , and HTTP_NAME . EQ is the default condition and
// can be omitted.
+ //
// - BEGINS_WITH : When you specify BEGINS_WITH for Condition , you can specify
// only one value. BEGINS_WITH is supported for TYPE , NAME , and HTTP_NAME .
Condition FilterCondition
@@ -576,12 +733,19 @@ type Operation struct {
// The code associated with ErrorMessage . Values for ErrorCode include the
// following:
+ //
// - ACCESS_DENIED
+ //
// - CANNOT_CREATE_HOSTED_ZONE
+ //
// - EXPIRED_TOKEN
+ //
// - HOSTED_ZONE_NOT_FOUND
+ //
// - INTERNAL_FAILURE
+ //
// - INVALID_CHANGE_BATCH
+ //
// - THROTTLED_REQUEST
ErrorCode *string
@@ -591,16 +755,25 @@ type Operation struct {
// The ID of the operation that you want to get information about.
Id *string
- // The status of the operation. Values include the following: SUBMITTED This is
- // the initial state that occurs immediately after you submit a request. PENDING
- // Cloud Map is performing the operation. SUCCESS The operation succeeded. FAIL The
- // operation failed. For the failure reason, see ErrorMessage .
+ // The status of the operation. Values include the following:
+ //
+ // SUBMITTED This is the initial state that occurs immediately after you submit a
+ // request.
+ //
+ // PENDING Cloud Map is performing the operation.
+ //
+ // SUCCESS The operation succeeded.
+ //
+ // FAIL The operation failed. For the failure reason, see ErrorMessage .
Status OperationStatus
- // The name of the target entity that's associated with the operation: NAMESPACE
- // The namespace ID is returned in the ResourceId property. SERVICE The service ID
- // is returned in the ResourceId property. INSTANCE The instance ID is returned in
- // the ResourceId property.
+ // The name of the target entity that's associated with the operation:
+ //
+ // NAMESPACE The namespace ID is returned in the ResourceId property.
+ //
+ // SERVICE The service ID is returned in the ResourceId property.
+ //
+ // INSTANCE The instance ID is returned in the ResourceId property.
Targets map[string]string
// The name of the operation that's associated with the specified ID.
@@ -619,11 +792,16 @@ type Operation struct {
type OperationFilter struct {
// Specify the operations that you want to get:
+ //
// - NAMESPACE_ID: Gets operations related to specified namespaces.
+ //
// - SERVICE_ID: Gets operations related to specified services.
+ //
// - STATUS: Gets operations based on the status of the operations: SUBMITTED ,
// PENDING , SUCCEED , or FAIL .
+ //
// - TYPE: Gets specified types of operation.
+ //
// - UPDATE_DATE: Gets operations that changed status during a specified
// date/time range.
//
@@ -631,13 +809,18 @@ type OperationFilter struct {
Name OperationFilterName
// Specify values that are applicable to the value that you specify for Name :
+ //
// - NAMESPACE_ID: Specify one namespace ID.
+ //
// - SERVICE_ID: Specify one service ID.
+ //
// - STATUS: Specify one or more statuses: SUBMITTED , PENDING , SUCCEED , or
// FAIL .
+ //
// - TYPE: Specify one or more of the following types: CREATE_NAMESPACE ,
// DELETE_NAMESPACE , UPDATE_SERVICE , REGISTER_INSTANCE , or DEREGISTER_INSTANCE
// .
+ //
// - UPDATE_DATE: Specify a start date and an end date in Unix date/time format
// and Coordinated Universal Time (UTC). The start date must be the first value.
//
@@ -646,12 +829,15 @@ type OperationFilter struct {
// The operator that you want to use to determine whether an operation matches the
// specified value. Valid values for condition include:
+ //
// - EQ : When you specify EQ for the condition, you can specify only one value.
// EQ is supported for NAMESPACE_ID , SERVICE_ID , STATUS , and TYPE . EQ is the
// default condition and can be omitted.
+ //
// - IN : When you specify IN for the condition, you can specify a list of one or
// more values. IN is supported for STATUS and TYPE . An operation must match one
// of the specified values to be returned in the response.
+ //
// - BETWEEN : Specify a start date and an end date in Unix date/time format and
// Coordinated Universal Time (UTC). The start date must be the first value.
// BETWEEN is supported for UPDATE_DATE .
@@ -661,18 +847,22 @@ type OperationFilter struct {
}
// A complex type that contains information about an operation that matches the
-// criteria that you specified in a ListOperations (https://docs.aws.amazon.com/cloud-map/latest/api/API_ListOperations.html)
-// request.
+// criteria that you specified in a [ListOperations]request.
+//
+// [ListOperations]: https://docs.aws.amazon.com/cloud-map/latest/api/API_ListOperations.html
type OperationSummary struct {
// The ID for an operation.
Id *string
// The status of the operation. Values include the following:
- // - SUBMITTED: This is the initial state immediately after you submit a
- // request.
+ //
+ // - SUBMITTED: This is the initial state immediately after you submit a request.
+ //
// - PENDING: Cloud Map is performing the operation.
+ //
// - SUCCESS: The operation succeeded.
+ //
// - FAIL: The operation failed. For the failure reason, see ErrorMessage .
Status OperationStatus
@@ -817,19 +1007,23 @@ type Service struct {
Description *string
// A complex type that contains information about the Route 53 DNS records that
- // you want Cloud Map to create when you register an instance. The record types of
- // a service can only be changed by deleting the service and recreating it with a
- // new Dnsconfig .
+ // you want Cloud Map to create when you register an instance.
+ //
+ // The record types of a service can only be changed by deleting the service and
+ // recreating it with a new Dnsconfig .
DnsConfig *DnsConfig
- // Public DNS and HTTP namespaces only. A complex type that contains settings for
+ // Public DNS and HTTP namespaces only. A complex type that contains settings for
// an optional health check. If you specify settings for a health check, Cloud Map
- // associates the health check with the records that you specify in DnsConfig . For
- // information about the charges for health checks, see Amazon Route 53 Pricing (http://aws.amazon.com/route53/pricing/)
- // .
+ // associates the health check with the records that you specify in DnsConfig .
+ //
+ // For information about the charges for health checks, see [Amazon Route 53 Pricing].
+ //
+ // [Amazon Route 53 Pricing]: http://aws.amazon.com/route53/pricing/
HealthCheckConfig *HealthCheckConfig
// A complex type that contains information about an optional custom health check.
+ //
// If you specify a health check configuration, you can specify either
// HealthCheckCustomConfig or HealthCheckConfig but not both.
HealthCheckCustomConfig *HealthCheckCustomConfig
@@ -850,9 +1044,14 @@ type Service struct {
NamespaceId *string
// Describes the systems that can be used to discover the service instances.
- // DNS_HTTP The service instances can be discovered using either DNS queries or the
- // DiscoverInstances API operation. HTTP The service instances can only be
- // discovered using the DiscoverInstances API operation. DNS Reserved.
+ //
+ // DNS_HTTP The service instances can be discovered using either DNS queries or
+ // the DiscoverInstances API operation.
+ //
+ // HTTP The service instances can only be discovered using the DiscoverInstances
+ // API operation.
+ //
+ // DNS Reserved.
Type ServiceType
noSmithyDocumentSerde
@@ -868,7 +1067,7 @@ type ServiceChange struct {
// when you register an instance.
DnsConfig *DnsConfigChange
- // Public DNS and HTTP namespaces only. Settings for an optional health check. If
+ // Public DNS and HTTP namespaces only. Settings for an optional health check. If
// you specify settings for a health check, Cloud Map associates the health check
// with the records that you specify in DnsConfig .
HealthCheckConfig *HealthCheckConfig
@@ -893,6 +1092,7 @@ type ServiceFilter struct {
// The operator that you want to use to determine whether a service is returned by
// ListServices . Valid values for Condition include the following:
+ //
// - EQ : When you specify EQ , specify one namespace ID for Values . EQ is the
// default condition and can be omitted.
Condition FilterCondition
@@ -917,7 +1117,7 @@ type ServiceSummary struct {
// when you register an instance.
DnsConfig *DnsConfig
- // Public DNS and HTTP namespaces only. Settings for an optional health check. If
+ // Public DNS and HTTP namespaces only. Settings for an optional health check. If
// you specify settings for a health check, Cloud Map associates the health check
// with the records that you specify in DnsConfig .
HealthCheckConfig *HealthCheckConfig
@@ -925,12 +1125,15 @@ type ServiceSummary struct {
// Information about an optional custom health check. A custom health check, which
// requires that you use a third-party health checker to evaluate the health of
// your resources, is useful in the following circumstances:
+ //
// - You can't use a health check that's defined by HealthCheckConfig because the
// resource isn't available over the internet. For example, you can use a custom
// health check when the instance is in an Amazon VPC. (To check the health of
// resources in a VPC, the health checker must also be in the VPC.)
+ //
// - You want to use a third-party health checker regardless of where your
// resources are located.
+ //
// If you specify a health check configuration, you can specify either
// HealthCheckCustomConfig or HealthCheckConfig but not both.
HealthCheckCustomConfig *HealthCheckCustomConfig
@@ -948,9 +1151,14 @@ type ServiceSummary struct {
Name *string
// Describes the systems that can be used to discover the service instances.
- // DNS_HTTP The service instances can be discovered using either DNS queries or the
- // DiscoverInstances API operation. HTTP The service instances can only be
- // discovered using the DiscoverInstances API operation. DNS Reserved.
+ //
+ // DNS_HTTP The service instances can be discovered using either DNS queries or
+ // the DiscoverInstances API operation.
+ //
+ // HTTP The service instances can only be discovered using the DiscoverInstances
+ // API operation.
+ //
+ // DNS Reserved.
Type ServiceType
noSmithyDocumentSerde
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
index 4a3e25ac1f..d477f4212f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
@@ -1,3 +1,19 @@
+# v1.20.9 (2024-05-23)
+
+* No change notes available for this release.
+
+# v1.20.8 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.7 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.6 (2024-05-08)
+
+* **Bug Fix**: GoDoc improvement
+
# v1.20.5 (2024-04-05)
* No change notes available for this release.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go
index 4b21e8b00a..44ad9ff1d2 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go
@@ -30,9 +30,10 @@ func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredenti
type GetRoleCredentialsInput struct {
- // The token issued by the CreateToken API call. For more information, see
- // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
- // in the IAM Identity Center OIDC API Reference Guide.
+ // The token issued by the CreateToken API call. For more information, see [CreateToken] in the
+ // IAM Identity Center OIDC API Reference Guide.
+ //
+ // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html
//
// This member is required.
AccessToken *string
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go
index e44da697c5..5861c9bbcc 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go
@@ -29,9 +29,10 @@ func (c *Client) ListAccountRoles(ctx context.Context, params *ListAccountRolesI
type ListAccountRolesInput struct {
- // The token issued by the CreateToken API call. For more information, see
- // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
- // in the IAM Identity Center OIDC API Reference Guide.
+ // The token issued by the CreateToken API call. For more information, see [CreateToken] in the
+ // IAM Identity Center OIDC API Reference Guide.
+ //
+ // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html
//
// This member is required.
AccessToken *string
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go
index 2d7add067f..7f2b239787 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go
@@ -12,9 +12,10 @@ import (
)
// Lists all AWS accounts assigned to the user. These AWS accounts are assigned by
-// the administrator of the account. For more information, see Assign User Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers)
-// in the IAM Identity Center User Guide. This operation returns a paginated
-// response.
+// the administrator of the account. For more information, see [Assign User Access]in the IAM Identity
+// Center User Guide. This operation returns a paginated response.
+//
+// [Assign User Access]: https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers
func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, optFns ...func(*Options)) (*ListAccountsOutput, error) {
if params == nil {
params = &ListAccountsInput{}
@@ -32,9 +33,10 @@ func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, op
type ListAccountsInput struct {
- // The token issued by the CreateToken API call. For more information, see
- // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
- // in the IAM Identity Center OIDC API Reference Guide.
+ // The token issued by the CreateToken API call. For more information, see [CreateToken] in the
+ // IAM Identity Center OIDC API Reference Guide.
+ //
+ // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html
//
// This member is required.
AccessToken *string
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go
index 3ee682d19e..65f582a874 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go
@@ -12,16 +12,20 @@ import (
// Removes the locally stored SSO tokens from the client-side cache and sends an
// API call to the IAM Identity Center service to invalidate the corresponding
-// server-side IAM Identity Center sign in session. If a user uses IAM Identity
-// Center to access the AWS CLI, the user’s IAM Identity Center sign in session is
-// used to obtain an IAM session, as specified in the corresponding IAM Identity
-// Center permission set. More specifically, IAM Identity Center assumes an IAM
-// role in the target account on behalf of the user, and the corresponding
-// temporary AWS credentials are returned to the client. After user logout, any
-// existing IAM role sessions that were created by using IAM Identity Center
-// permission sets continue based on the duration configured in the permission set.
-// For more information, see User authentications (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html)
-// in the IAM Identity Center User Guide.
+// server-side IAM Identity Center sign in session.
+//
+// If a user uses IAM Identity Center to access the AWS CLI, the user’s IAM
+// Identity Center sign in session is used to obtain an IAM session, as specified
+// in the corresponding IAM Identity Center permission set. More specifically, IAM
+// Identity Center assumes an IAM role in the target account on behalf of the user,
+// and the corresponding temporary AWS credentials are returned to the client.
+//
+// After user logout, any existing IAM role sessions that were created by using
+// IAM Identity Center permission sets continue based on the duration configured in
+// the permission set. For more information, see [User authentications]in the IAM Identity Center User
+// Guide.
+//
+// [User authentications]: https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html
func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func(*Options)) (*LogoutOutput, error) {
if params == nil {
params = &LogoutInput{}
@@ -39,9 +43,10 @@ func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func
type LogoutInput struct {
- // The token issued by the CreateToken API call. For more information, see
- // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
- // in the IAM Identity Center OIDC API Reference Guide.
+ // The token issued by the CreateToken API call. For more information, see [CreateToken] in the
+ // IAM Identity Center OIDC API Reference Guide.
+ //
+ // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html
//
// This member is required.
AccessToken *string
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go
index 8bba205f43..d6297fa6a1 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go
@@ -13,12 +13,22 @@ import (
smithyio "github.com/aws/smithy-go/io"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
+ smithytime "github.com/aws/smithy-go/time"
smithyhttp "github.com/aws/smithy-go/transport/http"
"io"
"io/ioutil"
"strings"
+ "time"
)
+func deserializeS3Expires(v string) (*time.Time, error) {
+ t, err := smithytime.ParseHTTPDate(v)
+ if err != nil {
+ return nil, nil
+ }
+ return &t, nil
+}
+
type awsRestjson1_deserializeOpGetRoleCredentials struct {
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go
index 59456d5dc2..7f6e429fda 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go
@@ -6,16 +6,22 @@
// AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web
// service that makes it easy for you to assign user access to IAM Identity Center
// resources such as the AWS access portal. Users can get AWS account applications
-// and roles assigned to them and get federated into the application. Although AWS
-// Single Sign-On was renamed, the sso and identitystore API namespaces will
-// continue to retain their original name for backward compatibility purposes. For
-// more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed)
-// . This reference guide describes the IAM Identity Center Portal operations that
+// and roles assigned to them and get federated into the application.
+//
+// Although AWS Single Sign-On was renamed, the sso and identitystore API
+// namespaces will continue to retain their original name for backward
+// compatibility purposes. For more information, see [IAM Identity Center rename].
+//
+// This reference guide describes the IAM Identity Center Portal operations that
// you can call programatically and includes detailed information on data types and
-// errors. AWS provides SDKs that consist of libraries and sample code for various
+// errors.
+//
+// AWS provides SDKs that consist of libraries and sample code for various
// programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android.
// The SDKs provide a convenient way to create programmatic access to IAM Identity
// Center and other AWS services. For more information about the AWS SDKs,
-// including how to download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/)
-// .
+// including how to download and install them, see [Tools for Amazon Web Services].
+//
+// [Tools for Amazon Web Services]: http://aws.amazon.com/tools/
+// [IAM Identity Center rename]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed
package sso
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
index 44379817e8..e9adaf46aa 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
@@ -3,4 +3,4 @@
package sso
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.20.5"
+const goModuleVersion = "1.20.9"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go
index 5dee7e53f4..3561c44308 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go
@@ -50,8 +50,10 @@ type Options struct {
// Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a
// value for this field will likely prevent you from using any endpoint-related
// service features released after the introduction of EndpointResolverV2 and
- // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom
- // endpoint, set the client option BaseEndpoint instead.
+ // BaseEndpoint.
+ //
+ // To migrate an EndpointResolver implementation that uses a custom endpoint, set
+ // the client option BaseEndpoint instead.
EndpointResolver EndpointResolver
// Resolves the endpoint used for a particular service operation. This should be
@@ -70,17 +72,20 @@ type Options struct {
// RetryMaxAttempts specifies the maximum number attempts an API client will call
// an operation that fails with a retryable error. A value of 0 is ignored, and
// will not be used to configure the API client created default retryer, or modify
- // per operation call's retry max attempts. If specified in an operation call's
- // functional options with a value that is different than the constructed client's
- // Options, the Client's Retryer will be wrapped to use the operation's specific
- // RetryMaxAttempts value.
+ // per operation call's retry max attempts.
+ //
+ // If specified in an operation call's functional options with a value that is
+ // different than the constructed client's Options, the Client's Retryer will be
+ // wrapped to use the operation's specific RetryMaxAttempts value.
RetryMaxAttempts int
// RetryMode specifies the retry mode the API client will be created with, if
- // Retryer option is not also specified. When creating a new API Clients this
- // member will only be used if the Retryer Options member is nil. This value will
- // be ignored if Retryer is not nil. Currently does not support per operation call
- // overrides, may in the future.
+ // Retryer option is not also specified.
+ //
+ // When creating a new API Clients this member will only be used if the Retryer
+ // Options member is nil. This value will be ignored if Retryer is not nil.
+ //
+ // Currently does not support per operation call overrides, may in the future.
RetryMode aws.RetryMode
// Retryer guides how HTTP requests should be retried in case of recoverable
@@ -97,8 +102,9 @@ type Options struct {
// The initial DefaultsMode used when the client options were constructed. If the
// DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
- // value was at that point in time. Currently does not support per operation call
- // overrides, may in the future.
+ // value was at that point in time.
+ //
+ // Currently does not support per operation call overrides, may in the future.
resolvedDefaultsMode aws.DefaultsMode
// The HTTP client to invoke API calls with. Defaults to client's default HTTP
@@ -143,6 +149,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for
// this field will likely prevent you from using any endpoint-related service
// features released after the introduction of EndpointResolverV2 and BaseEndpoint.
+//
// To migrate an EndpointResolver implementation that uses a custom endpoint, set
// the client option BaseEndpoint instead.
func WithEndpointResolver(v EndpointResolver) func(*Options) {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go
index 8dc02296b1..07ac468e31 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go
@@ -25,22 +25,24 @@ type AccountInfo struct {
type RoleCredentials struct {
// The identifier used for the temporary security credentials. For more
- // information, see Using Temporary Security Credentials to Request Access to AWS
- // Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
- // in the AWS IAM User Guide.
+ // information, see [Using Temporary Security Credentials to Request Access to AWS Resources]in the AWS IAM User Guide.
+ //
+ // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html
AccessKeyId *string
// The date on which temporary security credentials expire.
Expiration int64
- // The key that is used to sign the request. For more information, see Using
- // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
- // in the AWS IAM User Guide.
+ // The key that is used to sign the request. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS
+ // IAM User Guide.
+ //
+ // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html
SecretAccessKey *string
- // The token used for temporary credentials. For more information, see Using
- // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
- // in the AWS IAM User Guide.
+ // The token used for temporary credentials. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS
+ // IAM User Guide.
+ //
+ // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html
SessionToken *string
noSmithyDocumentSerde
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
index 053f180bf6..b70701a528 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
@@ -1,3 +1,23 @@
+# v1.24.3 (2024-05-23)
+
+* No change notes available for this release.
+
+# v1.24.2 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.1 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.0 (2024-05-10)
+
+* **Feature**: Updated request parameters for PKCE support.
+
+# v1.23.5 (2024-05-08)
+
+* **Bug Fix**: GoDoc improvement
+
# v1.23.4 (2024-03-29)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go
index 63f1eeb131..393ab84b04 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go
@@ -32,34 +32,43 @@ func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optF
type CreateTokenInput struct {
// The unique identifier string for the client or application. This value comes
- // from the result of the RegisterClient API.
+ // from the result of the RegisterClientAPI.
//
// This member is required.
ClientId *string
// A secret string generated for the client. This value should come from the
- // persisted result of the RegisterClient API.
+ // persisted result of the RegisterClientAPI.
//
// This member is required.
ClientSecret *string
// Supports the following OAuth grant types: Device Code and Refresh Token.
// Specify either of the following values, depending on the grant type that you
- // want: * Device Code - urn:ietf:params:oauth:grant-type:device_code * Refresh
- // Token - refresh_token For information about how to obtain the device code, see
- // the StartDeviceAuthorization topic.
+ // want:
+ //
+ // * Device Code - urn:ietf:params:oauth:grant-type:device_code
+ //
+ // * Refresh Token - refresh_token
+ //
+ // For information about how to obtain the device code, see the StartDeviceAuthorization topic.
//
// This member is required.
GrantType *string
// Used only when calling this API for the Authorization Code grant type. The
// short-term code is used to identify this authorization request. This grant type
- // is currently unsupported for the CreateToken API.
+ // is currently unsupported for the CreateTokenAPI.
Code *string
+ // Used only when calling this API for the Authorization Code grant type. This
+ // value is generated by the client and presented to validate the original code
+ // challenge value the client passed at authorization time.
+ CodeVerifier *string
+
// Used only when calling this API for the Device Code grant type. This short-term
// code is used to identify this authorization request. This comes from the result
- // of the StartDeviceAuthorization API.
+ // of the StartDeviceAuthorizationAPI.
DeviceCode *string
// Used only when calling this API for the Authorization Code grant type. This
@@ -69,16 +78,18 @@ type CreateTokenInput struct {
// Used only when calling this API for the Refresh Token grant type. This token is
// used to refresh short-term tokens, such as the access token, that might expire.
+ //
// For more information about the features and limitations of the current IAM
// Identity Center OIDC implementation, see Considerations for Using this Guide in
- // the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html)
- // .
+ // the [IAM Identity Center OIDC API Reference].
+ //
+ // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html
RefreshToken *string
// The list of scopes for which authorization is requested. The access token that
// is issued is limited to the scopes that are granted. If this value is not
// specified, IAM Identity Center authorizes all scopes that are configured for the
- // client during the call to RegisterClient .
+ // client during the call to RegisterClient.
Scope []string
noSmithyDocumentSerde
@@ -86,7 +97,8 @@ type CreateTokenInput struct {
type CreateTokenOutput struct {
- // A bearer token to access AWS accounts and applications assigned to a user.
+ // A bearer token to access Amazon Web Services accounts and applications assigned
+ // to a user.
AccessToken *string
// Indicates the time in seconds when an access token will expire.
@@ -94,18 +106,22 @@ type CreateTokenOutput struct {
// The idToken is not implemented or supported. For more information about the
// features and limitations of the current IAM Identity Center OIDC implementation,
- // see Considerations for Using this Guide in the IAM Identity Center OIDC API
- // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html)
- // . A JSON Web Token (JWT) that identifies who is associated with the issued
- // access token.
+ // see Considerations for Using this Guide in the [IAM Identity Center OIDC API Reference].
+ //
+ // A JSON Web Token (JWT) that identifies who is associated with the issued access
+ // token.
+ //
+ // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html
IdToken *string
// A token that, if present, can be used to refresh a previously issued access
- // token that might have expired. For more information about the features and
- // limitations of the current IAM Identity Center OIDC implementation, see
- // Considerations for Using this Guide in the IAM Identity Center OIDC API
- // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html)
- // .
+ // token that might have expired.
+ //
+ // For more information about the features and limitations of the current IAM
+ // Identity Center OIDC implementation, see Considerations for Using this Guide in
+ // the [IAM Identity Center OIDC API Reference].
+ //
+ // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html
RefreshToken *string
// Used to notify the client that the returned token is an access token. The
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go
index 6340953894..1d54f14d80 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go
@@ -12,8 +12,8 @@ import (
// Creates and returns access and refresh tokens for clients and applications that
// are authenticated using IAM entities. The access token can be used to fetch
-// short-term credentials for the assigned AWS accounts or to access application
-// APIs using bearer authentication.
+// short-term credentials for the assigned Amazon Web Services accounts or to
+// access application APIs using bearer authentication.
func (c *Client) CreateTokenWithIAM(ctx context.Context, params *CreateTokenWithIAMInput, optFns ...func(*Options)) (*CreateTokenWithIAMOutput, error) {
if params == nil {
params = &CreateTokenWithIAMInput{}
@@ -39,10 +39,15 @@ type CreateTokenWithIAMInput struct {
// Supports the following OAuth grant types: Authorization Code, Refresh Token,
// JWT Bearer, and Token Exchange. Specify one of the following values, depending
- // on the grant type that you want: * Authorization Code - authorization_code *
- // Refresh Token - refresh_token * JWT Bearer -
- // urn:ietf:params:oauth:grant-type:jwt-bearer * Token Exchange -
- // urn:ietf:params:oauth:grant-type:token-exchange
+ // on the grant type that you want:
+ //
+ // * Authorization Code - authorization_code
+ //
+ // * Refresh Token - refresh_token
+ //
+ // * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer
+ //
+ // * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange
//
// This member is required.
GrantType *string
@@ -59,6 +64,11 @@ type CreateTokenWithIAMInput struct {
// in the Authorization Code GrantOptions for the application.
Code *string
+ // Used only when calling this API for the Authorization Code grant type. This
+ // value is generated by the client and presented to validate the original code
+ // challenge value the client passed at authorization time.
+ CodeVerifier *string
+
// Used only when calling this API for the Authorization Code grant type. This
// value specifies the location of the client or application that has registered to
// receive the authorization code.
@@ -66,16 +76,21 @@ type CreateTokenWithIAMInput struct {
// Used only when calling this API for the Refresh Token grant type. This token is
// used to refresh short-term tokens, such as the access token, that might expire.
+ //
// For more information about the features and limitations of the current IAM
// Identity Center OIDC implementation, see Considerations for Using this Guide in
- // the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html)
- // .
+ // the [IAM Identity Center OIDC API Reference].
+ //
+ // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html
RefreshToken *string
// Used only when calling this API for the Token Exchange grant type. This value
// specifies the type of token that the requester can receive. The following values
- // are supported: * Access Token - urn:ietf:params:oauth:token-type:access_token *
- // Refresh Token - urn:ietf:params:oauth:token-type:refresh_token
+ // are supported:
+ //
+ // * Access Token - urn:ietf:params:oauth:token-type:access_token
+ //
+ // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token
RequestedTokenType *string
// The list of scopes for which authorization is requested. The access token that
@@ -94,8 +109,9 @@ type CreateTokenWithIAMInput struct {
// Used only when calling this API for the Token Exchange grant type. This value
// specifies the type of token that is passed as the subject of the exchange. The
- // following value is supported: * Access Token -
- // urn:ietf:params:oauth:token-type:access_token
+ // following value is supported:
+ //
+ // * Access Token - urn:ietf:params:oauth:token-type:access_token
SubjectTokenType *string
noSmithyDocumentSerde
@@ -103,7 +119,8 @@ type CreateTokenWithIAMInput struct {
type CreateTokenWithIAMOutput struct {
- // A bearer token to access AWS accounts and applications assigned to a user.
+ // A bearer token to access Amazon Web Services accounts and applications assigned
+ // to a user.
AccessToken *string
// Indicates the time in seconds when an access token will expire.
@@ -114,17 +131,21 @@ type CreateTokenWithIAMOutput struct {
IdToken *string
// Indicates the type of tokens that are issued by IAM Identity Center. The
- // following values are supported: * Access Token -
- // urn:ietf:params:oauth:token-type:access_token * Refresh Token -
- // urn:ietf:params:oauth:token-type:refresh_token
+ // following values are supported:
+ //
+ // * Access Token - urn:ietf:params:oauth:token-type:access_token
+ //
+ // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token
IssuedTokenType *string
// A token that, if present, can be used to refresh a previously issued access
- // token that might have expired. For more information about the features and
- // limitations of the current IAM Identity Center OIDC implementation, see
- // Considerations for Using this Guide in the IAM Identity Center OIDC API
- // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html)
- // .
+ // token that might have expired.
+ //
+ // For more information about the features and limitations of the current IAM
+ // Identity Center OIDC implementation, see Considerations for Using this Guide in
+ // the [IAM Identity Center OIDC API Reference].
+ //
+ // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html
RefreshToken *string
// The list of scopes for which authorization is granted. The access token that is
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go
index 09f016ec1e..9daccf79b8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go
@@ -41,6 +41,25 @@ type RegisterClientInput struct {
// This member is required.
ClientType *string
+ // This IAM Identity Center application ARN is used to define
+ // administrator-managed configuration for public client access to resources. At
+ // authorization, the scopes, grants, and redirect URI available to this client
+ // will be restricted by this application resource.
+ EntitledApplicationArn *string
+
+ // The list of OAuth 2.0 grant types that are defined by the client. This list is
+ // used to restrict the token granting flows available to the client.
+ GrantTypes []string
+
+ // The IAM Identity Center Issuer URL associated with an instance of IAM Identity
+ // Center. This value is needed for user access to resources through the client.
+ IssuerUrl *string
+
+ // The list of redirect URI that are defined by the client. At completion of
+ // authorization, this list is used to restrict what locations the user agent can
+ // be redirected back to.
+ RedirectUris []string
+
// The list of scopes that are defined by the client. Upon authorization, this
// list is used to restrict permissions when granting an access token.
Scopes []string
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go
index c568805b22..0b727e38b9 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go
@@ -30,22 +30,23 @@ func (c *Client) StartDeviceAuthorization(ctx context.Context, params *StartDevi
type StartDeviceAuthorizationInput struct {
// The unique identifier string for the client that is registered with IAM
- // Identity Center. This value should come from the persisted result of the
- // RegisterClient API operation.
+ // Identity Center. This value should come from the persisted result of the RegisterClientAPI
+ // operation.
//
// This member is required.
ClientId *string
// A secret string that is generated for the client. This value should come from
- // the persisted result of the RegisterClient API operation.
+ // the persisted result of the RegisterClientAPI operation.
//
// This member is required.
ClientSecret *string
- // The URL for the Amazon Web Services access portal. For more information, see
- // Using the Amazon Web Services access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html)
+ // The URL for the Amazon Web Services access portal. For more information, see [Using the Amazon Web Services access portal]
// in the IAM Identity Center User Guide.
//
+ // [Using the Amazon Web Services access portal]: https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html
+ //
// This member is required.
StartUrl *string
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go
index 76a1160ece..05e8c6b7e5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go
@@ -13,11 +13,21 @@ import (
smithyio "github.com/aws/smithy-go/io"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
+ smithytime "github.com/aws/smithy-go/time"
smithyhttp "github.com/aws/smithy-go/transport/http"
"io"
"strings"
+ "time"
)
+func deserializeS3Expires(v string) (*time.Time, error) {
+ t, err := smithytime.ParseHTTPDate(v)
+ if err != nil {
+ return nil, nil
+ }
+ return &t, nil
+}
+
type awsRestjson1_deserializeOpCreateToken struct {
}
@@ -581,12 +591,18 @@ func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response
case strings.EqualFold("InvalidClientMetadataException", errorCode):
return awsRestjson1_deserializeErrorInvalidClientMetadataException(response, errorBody)
+ case strings.EqualFold("InvalidRedirectUriException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidRedirectUriException(response, errorBody)
+
case strings.EqualFold("InvalidRequestException", errorCode):
return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
case strings.EqualFold("InvalidScopeException", errorCode):
return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody)
+ case strings.EqualFold("UnsupportedGrantTypeException", errorCode):
+ return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody)
+
default:
genericError := &smithy.GenericAPIError{
Code: errorCode,
@@ -1158,6 +1174,42 @@ func awsRestjson1_deserializeErrorInvalidGrantException(response *smithyhttp.Res
return output
}
+func awsRestjson1_deserializeErrorInvalidRedirectUriException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InvalidRedirectUriException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentInvalidRedirectUriException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
output := &types.InvalidRequestException{}
var buff [1024]byte
@@ -1717,6 +1769,55 @@ func awsRestjson1_deserializeDocumentInvalidGrantException(v **types.InvalidGran
return nil
}
+func awsRestjson1_deserializeDocumentInvalidRedirectUriException(v **types.InvalidRedirectUriException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.InvalidRedirectUriException
+ if *v == nil {
+ sv = &types.InvalidRedirectUriException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "error":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+ }
+ sv.Error_ = ptr.String(jtv)
+ }
+
+ case "error_description":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Error_description = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error {
if v == nil {
return fmt.Errorf("unexpected nil of type %T", v)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go
index 53cd4f55a0..1d258e5677 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go
@@ -6,33 +6,41 @@
// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a
// client (such as CLI or a native application) to register with IAM Identity
// Center. The service also enables the client to fetch the user’s access token
-// upon successful authentication and authorization with IAM Identity Center. IAM
-// Identity Center uses the sso and identitystore API namespaces. Considerations
-// for Using This Guide Before you begin using this guide, we recommend that you
-// first review the following important information about how the IAM Identity
-// Center OIDC service works.
+// upon successful authentication and authorization with IAM Identity Center.
+//
+// IAM Identity Center uses the sso and identitystore API namespaces.
+//
+// # Considerations for Using This Guide
+//
+// Before you begin using this guide, we recommend that you first review the
+// following important information about how the IAM Identity Center OIDC service
+// works.
+//
// - The IAM Identity Center OIDC service currently implements only the portions
-// of the OAuth 2.0 Device Authorization Grant standard (
-// https://tools.ietf.org/html/rfc8628 (https://tools.ietf.org/html/rfc8628) )
-// that are necessary to enable single sign-on authentication with the CLI.
+// of the OAuth 2.0 Device Authorization Grant standard ([https://tools.ietf.org/html/rfc8628] ) that are necessary to
+// enable single sign-on authentication with the CLI.
+//
// - With older versions of the CLI, the service only emits OIDC access tokens,
// so to obtain a new token, users must explicitly re-authenticate. To access the
// OIDC flow that supports token refresh and doesn’t require re-authentication,
// update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI V2) with
// support for OIDC token refresh and configurable IAM Identity Center session
-// durations. For more information, see Configure Amazon Web Services access
-// portal session duration (https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html)
-// .
+// durations. For more information, see [Configure Amazon Web Services access portal session duration].
+//
// - The access tokens provided by this service grant access to all Amazon Web
// Services account entitlements assigned to an IAM Identity Center user, not just
// a particular application.
+//
// - The documentation in this guide does not describe the mechanism to convert
// the access token into Amazon Web Services Auth (“sigv4”) credentials for use
// with IAM-protected Amazon Web Services service endpoints. For more information,
-// see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html)
-// in the IAM Identity Center Portal API Reference Guide.
+// see [GetRoleCredentials]in the IAM Identity Center Portal API Reference Guide.
+//
+// For general information about IAM Identity Center, see [What is IAM Identity Center?] in the IAM Identity
+// Center User Guide.
//
-// For general information about IAM Identity Center, see What is IAM Identity
-// Center? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html)
-// in the IAM Identity Center User Guide.
+// [Configure Amazon Web Services access portal session duration]: https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html
+// [GetRoleCredentials]: https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html
+// [https://tools.ietf.org/html/rfc8628]: https://tools.ietf.org/html/rfc8628
+// [What is IAM Identity Center?]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html
package ssooidc
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
index cbc7e8415f..80189fbfbc 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
@@ -3,4 +3,4 @@
package ssooidc
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.23.4"
+const goModuleVersion = "1.24.3"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go
index b964e7e109..69ded47c74 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go
@@ -50,8 +50,10 @@ type Options struct {
// Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a
// value for this field will likely prevent you from using any endpoint-related
// service features released after the introduction of EndpointResolverV2 and
- // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom
- // endpoint, set the client option BaseEndpoint instead.
+ // BaseEndpoint.
+ //
+ // To migrate an EndpointResolver implementation that uses a custom endpoint, set
+ // the client option BaseEndpoint instead.
EndpointResolver EndpointResolver
// Resolves the endpoint used for a particular service operation. This should be
@@ -70,17 +72,20 @@ type Options struct {
// RetryMaxAttempts specifies the maximum number attempts an API client will call
// an operation that fails with a retryable error. A value of 0 is ignored, and
// will not be used to configure the API client created default retryer, or modify
- // per operation call's retry max attempts. If specified in an operation call's
- // functional options with a value that is different than the constructed client's
- // Options, the Client's Retryer will be wrapped to use the operation's specific
- // RetryMaxAttempts value.
+ // per operation call's retry max attempts.
+ //
+ // If specified in an operation call's functional options with a value that is
+ // different than the constructed client's Options, the Client's Retryer will be
+ // wrapped to use the operation's specific RetryMaxAttempts value.
RetryMaxAttempts int
// RetryMode specifies the retry mode the API client will be created with, if
- // Retryer option is not also specified. When creating a new API Clients this
- // member will only be used if the Retryer Options member is nil. This value will
- // be ignored if Retryer is not nil. Currently does not support per operation call
- // overrides, may in the future.
+ // Retryer option is not also specified.
+ //
+ // When creating a new API Clients this member will only be used if the Retryer
+ // Options member is nil. This value will be ignored if Retryer is not nil.
+ //
+ // Currently does not support per operation call overrides, may in the future.
RetryMode aws.RetryMode
// Retryer guides how HTTP requests should be retried in case of recoverable
@@ -97,8 +102,9 @@ type Options struct {
// The initial DefaultsMode used when the client options were constructed. If the
// DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
- // value was at that point in time. Currently does not support per operation call
- // overrides, may in the future.
+ // value was at that point in time.
+ //
+ // Currently does not support per operation call overrides, may in the future.
resolvedDefaultsMode aws.DefaultsMode
// The HTTP client to invoke API calls with. Defaults to client's default HTTP
@@ -143,6 +149,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for
// this field will likely prevent you from using any endpoint-related service
// features released after the introduction of EndpointResolverV2 and BaseEndpoint.
+//
// To migrate an EndpointResolver implementation that uses a custom endpoint, set
// the client option BaseEndpoint instead.
func WithEndpointResolver(v EndpointResolver) func(*Options) {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go
index 754218b78e..04411bd616 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go
@@ -95,6 +95,11 @@ func awsRestjson1_serializeOpDocumentCreateTokenInput(v *CreateTokenInput, value
ok.String(*v.Code)
}
+ if v.CodeVerifier != nil {
+ ok := object.Key("codeVerifier")
+ ok.String(*v.CodeVerifier)
+ }
+
if v.DeviceCode != nil {
ok := object.Key("deviceCode")
ok.String(*v.DeviceCode)
@@ -207,6 +212,11 @@ func awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(v *CreateTokenWithI
ok.String(*v.Code)
}
+ if v.CodeVerifier != nil {
+ ok := object.Key("codeVerifier")
+ ok.String(*v.CodeVerifier)
+ }
+
if v.GrantType != nil {
ok := object.Key("grantType")
ok.String(*v.GrantType)
@@ -324,6 +334,30 @@ func awsRestjson1_serializeOpDocumentRegisterClientInput(v *RegisterClientInput,
ok.String(*v.ClientType)
}
+ if v.EntitledApplicationArn != nil {
+ ok := object.Key("entitledApplicationArn")
+ ok.String(*v.EntitledApplicationArn)
+ }
+
+ if v.GrantTypes != nil {
+ ok := object.Key("grantTypes")
+ if err := awsRestjson1_serializeDocumentGrantTypes(v.GrantTypes, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.IssuerUrl != nil {
+ ok := object.Key("issuerUrl")
+ ok.String(*v.IssuerUrl)
+ }
+
+ if v.RedirectUris != nil {
+ ok := object.Key("redirectUris")
+ if err := awsRestjson1_serializeDocumentRedirectUris(v.RedirectUris, ok); err != nil {
+ return err
+ }
+ }
+
if v.Scopes != nil {
ok := object.Key("scopes")
if err := awsRestjson1_serializeDocumentScopes(v.Scopes, ok); err != nil {
@@ -419,6 +453,28 @@ func awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(v *StartDevic
return nil
}
+func awsRestjson1_serializeDocumentGrantTypes(v []string, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ av.String(v[i])
+ }
+ return nil
+}
+
+func awsRestjson1_serializeDocumentRedirectUris(v []string, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ av.String(v[i])
+ }
+ return nil
+}
+
func awsRestjson1_serializeDocumentScopes(v []string, value smithyjson.Value) error {
array := value.Array()
defer array.Close()
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go
index 86b62049fd..2cfe7b48fe 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go
@@ -188,7 +188,7 @@ func (e *InvalidClientMetadataException) ErrorCode() string {
func (e *InvalidClientMetadataException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// Indicates that a request contains an invalid grant. This can occur if a client
-// makes a CreateToken request with an invalid grant type.
+// makes a CreateTokenrequest with an invalid grant type.
type InvalidGrantException struct {
Message *string
@@ -217,6 +217,36 @@ func (e *InvalidGrantException) ErrorCode() string {
}
func (e *InvalidGrantException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+// Indicates that one or more redirect URI in the request is not supported for
+// this operation.
+type InvalidRedirectUriException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ Error_ *string
+ Error_description *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *InvalidRedirectUriException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidRedirectUriException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidRedirectUriException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidRedirectUriException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InvalidRedirectUriException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
// Indicates that something is wrong with the input to the request. For example, a
// required parameter might be missing or out of range.
type InvalidRequestException struct {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
index 2fd5d5a649..77cd603460 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
@@ -1,3 +1,19 @@
+# v1.28.10 (2024-05-23)
+
+* No change notes available for this release.
+
+# v1.28.9 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.8 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.7 (2024-05-08)
+
+* **Bug Fix**: GoDoc improvement
+
# v1.28.6 (2024-03-29)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go
index e0e2c9c2e8..936f917bfd 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go
@@ -16,69 +16,99 @@ import (
// Amazon Web Services resources. These temporary credentials consist of an access
// key ID, a secret access key, and a security token. Typically, you use AssumeRole
// within your account or for cross-account access. For a comparison of AssumeRole
-// with other API operations that produce temporary credentials, see Requesting
-// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
-// in the IAM User Guide. Permissions The temporary security credentials created by
-// AssumeRole can be used to make API calls to any Amazon Web Services service
-// with the following exception: You cannot call the Amazon Web Services STS
-// GetFederationToken or GetSessionToken API operations. (Optional) You can pass
-// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// to this operation. You can pass a single JSON policy document to use as an
-// inline session policy. You can also specify up to 10 managed policy Amazon
-// Resource Names (ARNs) to use as managed session policies. The plaintext that you
-// use for both inline and managed session policies can't exceed 2,048 characters.
-// Passing policies to this operation returns new temporary credentials. The
-// resulting session's permissions are the intersection of the role's
-// identity-based policy and the session policies. You can use the role's temporary
-// credentials in subsequent Amazon Web Services API calls to access resources in
-// the account that owns the role. You cannot use session policies to grant more
-// permissions than those allowed by the identity-based policy of the role that is
-// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// in the IAM User Guide. When you create a role, you create two policies: a role
-// trust policy that specifies who can assume the role, and a permissions policy
-// that specifies what can be done with the role. You specify the trusted principal
-// that is allowed to assume the role in the role trust policy. To assume a role
-// from a different account, your Amazon Web Services account must be trusted by
-// the role. The trust relationship is defined in the role's trust policy when the
-// role is created. That trust policy states which accounts are allowed to delegate
-// that access to users in the account. A user who wants to access a role in a
-// different account must also have permissions that are delegated from the account
-// administrator. The administrator must attach a policy that allows the user to
-// call AssumeRole for the ARN of the role in the other account. To allow a user
-// to assume a role in the same account, you can do either of the following:
+// with other API operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the
+// IAM User Guide.
+//
+// # Permissions
+//
+// The temporary security credentials created by AssumeRole can be used to make
+// API calls to any Amazon Web Services service with the following exception: You
+// cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken
+// API operations.
+//
+// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a
+// single JSON policy document to use as an inline session policy. You can also
+// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed
+// session policies. The plaintext that you use for both inline and managed session
+// policies can't exceed 2,048 characters. Passing policies to this operation
+// returns new temporary credentials. The resulting session's permissions are the
+// intersection of the role's identity-based policy and the session policies. You
+// can use the role's temporary credentials in subsequent Amazon Web Services API
+// calls to access resources in the account that owns the role. You cannot use
+// session policies to grant more permissions than those allowed by the
+// identity-based policy of the role that is being assumed. For more information,
+// see [Session Policies]in the IAM User Guide.
+//
+// When you create a role, you create two policies: a role trust policy that
+// specifies who can assume the role, and a permissions policy that specifies what
+// can be done with the role. You specify the trusted principal that is allowed to
+// assume the role in the role trust policy.
+//
+// To assume a role from a different account, your Amazon Web Services account
+// must be trusted by the role. The trust relationship is defined in the role's
+// trust policy when the role is created. That trust policy states which accounts
+// are allowed to delegate that access to users in the account.
+//
+// A user who wants to access a role in a different account must also have
+// permissions that are delegated from the account administrator. The administrator
+// must attach a policy that allows the user to call AssumeRole for the ARN of the
+// role in the other account.
+//
+// To allow a user to assume a role in the same account, you can do either of the
+// following:
+//
// - Attach a policy to the user that allows the user to call AssumeRole (as long
// as the role's trust policy trusts the account).
+//
// - Add the user as a principal directly in the role's trust policy.
//
// You can do either because the role’s trust policy acts as an IAM resource-based
// policy. When a resource-based policy grants access to a principal in the same
// account, no additional identity-based policy is required. For more information
-// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)
-// in the IAM User Guide. Tags (Optional) You can pass tag key-value pairs to your
-// session. These tags are called session tags. For more information about session
-// tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
-// in the IAM User Guide. An administrator must grant you the permissions necessary
-// to pass session tags. The administrator can also create granular permissions to
-// allow you to pass only specific session tags. For more information, see
-// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
-// in the IAM User Guide. You can set the session tags as transitive. Transitive
-// tags persist during role chaining. For more information, see Chaining Roles
-// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
-// in the IAM User Guide. Using MFA with AssumeRole (Optional) You can include
-// multi-factor authentication (MFA) information when you call AssumeRole . This is
-// useful for cross-account scenarios to ensure that the user that assumes the role
-// has been authenticated with an Amazon Web Services MFA device. In that scenario,
-// the trust policy of the role being assumed includes a condition that tests for
-// MFA authentication. If the caller does not include valid MFA information, the
-// request to assume the role is denied. The condition in a trust policy that tests
-// for MFA authentication might look like the following example. "Condition":
-// {"Bool": {"aws:MultiFactorAuthPresent": true}} For more information, see
-// Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html)
-// in the IAM User Guide guide. To use MFA with AssumeRole , you pass values for
-// the SerialNumber and TokenCode parameters. The SerialNumber value identifies
-// the user's hardware or virtual MFA device. The TokenCode is the time-based
-// one-time password (TOTP) that the MFA device produces.
+// about trust policies and resource-based policies, see [IAM Policies]in the IAM User Guide.
+//
+// # Tags
+//
+// (Optional) You can pass tag key-value pairs to your session. These tags are
+// called session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM
+// User Guide.
+//
+// An administrator must grant you the permissions necessary to pass session tags.
+// The administrator can also create granular permissions to allow you to pass only
+// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide.
+//
+// You can set the session tags as transitive. Transitive tags persist during role
+// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide.
+//
+// # Using MFA with AssumeRole
+//
+// (Optional) You can include multi-factor authentication (MFA) information when
+// you call AssumeRole . This is useful for cross-account scenarios to ensure that
+// the user that assumes the role has been authenticated with an Amazon Web
+// Services MFA device. In that scenario, the trust policy of the role being
+// assumed includes a condition that tests for MFA authentication. If the caller
+// does not include valid MFA information, the request to assume the role is
+// denied. The condition in a trust policy that tests for MFA authentication might
+// look like the following example.
+//
+// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
+//
+// For more information, see [Configuring MFA-Protected API Access] in the IAM User Guide guide.
+//
+// To use MFA with AssumeRole , you pass values for the SerialNumber and TokenCode
+// parameters. The SerialNumber value identifies the user's hardware or virtual
+// MFA device. The TokenCode is the time-based one-time password (TOTP) that the
+// MFA device produces.
+//
+// [Configuring MFA-Protected API Access]: https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html
+// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining
+// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison
+// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [IAM Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
+// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html
+// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html
func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) {
if params == nil {
params = &AssumeRoleInput{}
@@ -101,17 +131,19 @@ type AssumeRoleInput struct {
// This member is required.
RoleArn *string
- // An identifier for the assumed role session. Use the role session name to
- // uniquely identify a session when the same role is assumed by different
- // principals or for different reasons. In cross-account scenarios, the role
- // session name is visible to, and can be logged by the account that owns the role.
- // The role session name is also used in the ARN of the assumed role principal.
- // This means that subsequent cross-account API requests that use the temporary
- // security credentials will expose the role session name to the external account
- // in their CloudTrail logs. The regex used to validate this parameter is a string
- // of characters consisting of upper- and lower-case alphanumeric characters with
- // no spaces. You can also include underscores or any of the following characters:
- // =,.@-
+ // An identifier for the assumed role session.
+ //
+ // Use the role session name to uniquely identify a session when the same role is
+ // assumed by different principals or for different reasons. In cross-account
+ // scenarios, the role session name is visible to, and can be logged by the account
+ // that owns the role. The role session name is also used in the ARN of the assumed
+ // role principal. This means that subsequent cross-account API requests that use
+ // the temporary security credentials will expose the role session name to the
+ // external account in their CloudTrail logs.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
//
// This member is required.
RoleSessionName *string
@@ -122,23 +154,27 @@ type AssumeRoleInput struct {
// hours. If you specify a value higher than this setting or the administrator
// setting (whichever is lower), the operation fails. For example, if you specify a
// session duration of 12 hours, but your administrator set the maximum session
- // duration to 6 hours, your operation fails. Role chaining limits your Amazon Web
- // Services CLI or Amazon Web Services API role session to a maximum of one hour.
- // When you use the AssumeRole API operation to assume a role, you can specify the
- // duration of your role session with the DurationSeconds parameter. You can
- // specify a parameter value of up to 43200 seconds (12 hours), depending on the
- // maximum session duration setting for your role. However, if you assume a role
- // using role chaining and provide a DurationSeconds parameter value greater than
- // one hour, the operation fails. To learn how to view the maximum value for your
- // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
- // in the IAM User Guide. By default, the value is set to 3600 seconds. The
- // DurationSeconds parameter is separate from the duration of a console session
- // that you might request using the returned credentials. The request to the
- // federation endpoint for a console sign-in token takes a SessionDuration
+ // duration to 6 hours, your operation fails.
+ //
+ // Role chaining limits your Amazon Web Services CLI or Amazon Web Services API
+ // role session to a maximum of one hour. When you use the AssumeRole API
+ // operation to assume a role, you can specify the duration of your role session
+ // with the DurationSeconds parameter. You can specify a parameter value of up to
+ // 43200 seconds (12 hours), depending on the maximum session duration setting for
+ // your role. However, if you assume a role using role chaining and provide a
+ // DurationSeconds parameter value greater than one hour, the operation fails. To
+ // learn how to view the maximum value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request to
+ // the federation endpoint for a console sign-in token takes a SessionDuration
// parameter that specifies the maximum length of the console session. For more
- // information, see Creating a URL that Enables Federated Users to Access the
- // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
- // in the IAM User Guide.
+ // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide.
+ //
+ // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session
+ // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html
DurationSeconds *int32
// A unique identifier that might be required when you assume a role in another
@@ -149,63 +185,79 @@ type AssumeRoleInput struct {
// the administrator of the trusting account might send an external ID to the
// administrator of the trusted account. That way, only someone with the ID can
// assume the role, rather than everyone in the account. For more information about
- // the external ID, see How to Use an External ID When Granting Access to Your
- // Amazon Web Services Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
- // in the IAM User Guide. The regex used to validate this parameter is a string of
- // characters consisting of upper- and lower-case alphanumeric characters with no
- // spaces. You can also include underscores or any of the following characters:
- // =,.@:/-
+ // the external ID, see [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]in the IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@:/-
+ //
+ // [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html
ExternalId *string
// An IAM policy in JSON format that you want to use as an inline session policy.
+ //
// This parameter is optional. Passing policies to this operation returns new
// temporary credentials. The resulting session's permissions are the intersection
// of the role's identity-based policy and the session policies. You can use the
// role's temporary credentials in subsequent Amazon Web Services API calls to
// access resources in the account that owns the role. You cannot use session
// policies to grant more permissions than those allowed by the identity-based
- // policy of the role that is being assumed. For more information, see Session
- // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide. The plaintext that you use for both inline and managed
- // session policies can't exceed 2,048 characters. The JSON policy characters can
- // be any ASCII character from the space character to the end of the valid
- // character list (\u0020 through \u00FF). It can also include the tab (\u0009),
- // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web
- // Services conversion compresses the passed inline session policy, managed policy
- // ARNs, and session tags into a packed binary format that has a separate limit.
- // Your request can fail for this limit even if your plaintext meets the other
- // requirements. The PackedPolicySize response element indicates by percentage how
- // close the policies and tags for your request are to the upper size limit.
+ // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM
+ // User Guide.
+ //
+ // The plaintext that you use for both inline and managed session policies can't
+ // exceed 2,048 characters. The JSON policy characters can be any ASCII character
+ // from the space character to the end of the valid character list (\u0020 through
+ // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
+ // return (\u000D) characters.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
Policy *string
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want to
// use as managed session policies. The policies must exist in the same account as
- // the role. This parameter is optional. You can provide up to 10 managed policy
- // ARNs. However, the plaintext that you use for both inline and managed session
- // policies can't exceed 2,048 characters. For more information about ARNs, see
- // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // in the Amazon Web Services General Reference. An Amazon Web Services conversion
- // compresses the passed inline session policy, managed policy ARNs, and session
- // tags into a packed binary format that has a separate limit. Your request can
- // fail for this limit even if your plaintext meets the other requirements. The
- // PackedPolicySize response element indicates by percentage how close the policies
- // and tags for your request are to the upper size limit. Passing policies to this
- // operation returns new temporary credentials. The resulting session's permissions
- // are the intersection of the role's identity-based policy and the session
- // policies. You can use the role's temporary credentials in subsequent Amazon Web
- // Services API calls to access resources in the account that owns the role. You
- // cannot use session policies to grant more permissions than those allowed by the
- // identity-based policy of the role that is being assumed. For more information,
- // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide.
+ // the role.
+ //
+ // This parameter is optional. You can provide up to 10 managed policy ARNs.
+ // However, the plaintext that you use for both inline and managed session policies
+ // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the
+ // Amazon Web Services General Reference.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's
+ // identity-based policy and the session policies. You can use the role's temporary
+ // credentials in subsequent Amazon Web Services API calls to access resources in
+ // the account that owns the role. You cannot use session policies to grant more
+ // permissions than those allowed by the identity-based policy of the role that is
+ // being assumed. For more information, see [Session Policies]in the IAM User Guide.
+ //
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
PolicyArns []types.PolicyDescriptorType
// A list of previously acquired trusted context assertions in the format of a
// JSON array. The trusted context assertion is signed and encrypted by Amazon Web
- // Services STS. The following is an example of a ProvidedContext value that
- // includes a single trusted context assertion and the ARN of the context provider
- // from which the trusted context assertion was generated.
- // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}]
+ // Services STS.
+ //
+ // The following is an example of a ProvidedContext value that includes a single
+ // trusted context assertion and the ARN of the context provider from which the
+ // trusted context assertion was generated.
+ //
+ // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}]
ProvidedContexts []types.ProvidedContext
// The identification number of the MFA device that is associated with the user
@@ -213,79 +265,97 @@ type AssumeRoleInput struct {
// the role being assumed includes a condition that requires MFA authentication.
// The value is either the serial number for a hardware device (such as
// GAHT12345678 ) or an Amazon Resource Name (ARN) for a virtual device (such as
- // arn:aws:iam::123456789012:mfa/user ). The regex used to validate this parameter
- // is a string of characters consisting of upper- and lower-case alphanumeric
- // characters with no spaces. You can also include underscores or any of the
- // following characters: =,.@-
+ // arn:aws:iam::123456789012:mfa/user ).
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
SerialNumber *string
// The source identity specified by the principal that is calling the AssumeRole
- // operation. You can require users to specify a source identity when they assume a
- // role. You do this by using the sts:SourceIdentity condition key in a role trust
- // policy. You can use source identity information in CloudTrail logs to determine
- // who took actions with a role. You can use the aws:SourceIdentity condition key
- // to further control access to Amazon Web Services resources based on the value of
- // source identity. For more information about using source identity, see Monitor
- // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
- // in the IAM User Guide. The regex used to validate this parameter is a string of
- // characters consisting of upper- and lower-case alphanumeric characters with no
- // spaces. You can also include underscores or any of the following characters:
- // =,.@-. You cannot use a value that begins with the text aws: . This prefix is
- // reserved for Amazon Web Services internal use.
+ // operation.
+ //
+ // You can require users to specify a source identity when they assume a role. You
+ // do this by using the sts:SourceIdentity condition key in a role trust policy.
+ // You can use source identity information in CloudTrail logs to determine who took
+ // actions with a role. You can use the aws:SourceIdentity condition key to
+ // further control access to Amazon Web Services resources based on the value of
+ // source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the
+ // IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-. You cannot use a
+ // value that begins with the text aws: . This prefix is reserved for Amazon Web
+ // Services internal use.
+ //
+ // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html
SourceIdentity *string
// A list of session tags that you want to pass. Each session tag consists of a
- // key name and an associated value. For more information about session tags, see
- // Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
- // in the IAM User Guide. This parameter is optional. You can pass up to 50 session
- // tags. The plaintext session tag keys can’t exceed 128 characters, and the values
- // can’t exceed 256 characters. For these and additional limits, see IAM and STS
- // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
- // in the IAM User Guide. An Amazon Web Services conversion compresses the passed
- // inline session policy, managed policy ARNs, and session tags into a packed
- // binary format that has a separate limit. Your request can fail for this limit
- // even if your plaintext meets the other requirements. The PackedPolicySize
- // response element indicates by percentage how close the policies and tags for
- // your request are to the upper size limit. You can pass a session tag with the
- // same key as a tag that is already attached to the role. When you do, session
- // tags override a role tag with the same key. Tag key–value pairs are not case
- // sensitive, but case is preserved. This means that you cannot have separate
- // Department and department tag keys. Assume that the role has the Department =
- // Marketing tag and you pass the department = engineering session tag. Department
- // and department are not saved as separate tags, and the session tag passed in
- // the request takes precedence over the role tag. Additionally, if you used
- // temporary credentials to perform this operation, the new session inherits any
- // transitive session tags from the calling session. If you pass a session tag with
- // the same key as an inherited tag, the operation fails. To view the inherited
- // tags for a session, see the CloudTrail logs. For more information, see Viewing
- // Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs)
+ // key name and an associated value. For more information about session tags, see [Tagging Amazon Web Services STS Sessions]
// in the IAM User Guide.
+ //
+ // This parameter is optional. You can pass up to 50 session tags. The plaintext
+ // session tag keys can’t exceed 128 characters, and the values can’t exceed 256
+ // characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // You can pass a session tag with the same key as a tag that is already attached
+ // to the role. When you do, session tags override a role tag with the same key.
+ //
+ // Tag key–value pairs are not case sensitive, but case is preserved. This means
+ // that you cannot have separate Department and department tag keys. Assume that
+ // the role has the Department = Marketing tag and you pass the department =
+ // engineering session tag. Department and department are not saved as separate
+ // tags, and the session tag passed in the request takes precedence over the role
+ // tag.
+ //
+ // Additionally, if you used temporary credentials to perform this operation, the
+ // new session inherits any transitive session tags from the calling session. If
+ // you pass a session tag with the same key as an inherited tag, the operation
+ // fails. To view the inherited tags for a session, see the CloudTrail logs. For
+ // more information, see [Viewing Session Tags in CloudTrail]in the IAM User Guide.
+ //
+ // [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+ // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
+ // [Viewing Session Tags in CloudTrail]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs
Tags []types.Tag
// The value provided by the MFA device, if the trust policy of the role being
// assumed requires MFA. (In other words, if the policy includes a condition that
// tests for MFA). If the role being assumed requires MFA and if the TokenCode
// value is missing or expired, the AssumeRole call returns an "access denied"
- // error. The format for this parameter, as described by its regex pattern, is a
- // sequence of six numeric digits.
+ // error.
+ //
+ // The format for this parameter, as described by its regex pattern, is a sequence
+ // of six numeric digits.
TokenCode *string
// A list of keys for session tags that you want to set as transitive. If you set
// a tag key as transitive, the corresponding key and value passes to subsequent
- // sessions in a role chain. For more information, see Chaining Roles with Session
- // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
- // in the IAM User Guide. This parameter is optional. When you set session tags as
- // transitive, the session policy and session tags packed binary limit is not
- // affected. If you choose not to specify a transitive tag key, then no tags are
- // passed from this session to any subsequent sessions.
+ // sessions in a role chain. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide.
+ //
+ // This parameter is optional. When you set session tags as transitive, the
+ // session policy and session tags packed binary limit is not affected.
+ //
+ // If you choose not to specify a transitive tag key, then no tags are passed from
+ // this session to any subsequent sessions.
+ //
+ // [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining
TransitiveTagKeys []string
noSmithyDocumentSerde
}
-// Contains the response to a successful AssumeRole request, including temporary
-// Amazon Web Services credentials that can be used to make Amazon Web Services
-// requests.
+// Contains the response to a successful AssumeRole request, including temporary Amazon Web
+// Services credentials that can be used to make Amazon Web Services requests.
type AssumeRoleOutput struct {
// The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
@@ -296,9 +366,10 @@ type AssumeRoleOutput struct {
AssumedRoleUser *types.AssumedRoleUser
// The temporary security credentials, which include an access key ID, a secret
- // access key, and a security (or session) token. The size of the security token
- // that STS API operations return is not fixed. We strongly recommend that you make
- // no assumptions about the maximum size.
+ // access key, and a security (or session) token.
+ //
+ // The size of the security token that STS API operations return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size.
Credentials *types.Credentials
// A percentage value that indicates the packed size of the session policies and
@@ -308,17 +379,21 @@ type AssumeRoleOutput struct {
PackedPolicySize *int32
// The source identity specified by the principal that is calling the AssumeRole
- // operation. You can require users to specify a source identity when they assume a
- // role. You do this by using the sts:SourceIdentity condition key in a role trust
- // policy. You can use source identity information in CloudTrail logs to determine
- // who took actions with a role. You can use the aws:SourceIdentity condition key
- // to further control access to Amazon Web Services resources based on the value of
- // source identity. For more information about using source identity, see Monitor
- // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
- // in the IAM User Guide. The regex used to validate this parameter is a string of
- // characters consisting of upper- and lower-case alphanumeric characters with no
- // spaces. You can also include underscores or any of the following characters:
- // =,.@-
+ // operation.
+ //
+ // You can require users to specify a source identity when they assume a role. You
+ // do this by using the sts:SourceIdentity condition key in a role trust policy.
+ // You can use source identity information in CloudTrail logs to determine who took
+ // actions with a role. You can use the aws:SourceIdentity condition key to
+ // further control access to Amazon Web Services resources based on the value of
+ // source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the
+ // IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
+ //
+ // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html
SourceIdentity *string
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go
index 2a57b72ac9..f88ab4a22b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go
@@ -16,92 +16,132 @@ import (
// mechanism for tying an enterprise identity store or directory to role-based
// Amazon Web Services access without user-specific credentials or configuration.
// For a comparison of AssumeRoleWithSAML with the other API operations that
-// produce temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
-// in the IAM User Guide. The temporary security credentials returned by this
-// operation consist of an access key ID, a secret access key, and a security
-// token. Applications can use these temporary security credentials to sign calls
-// to Amazon Web Services services. Session Duration By default, the temporary
-// security credentials created by AssumeRoleWithSAML last for one hour. However,
-// you can use the optional DurationSeconds parameter to specify the duration of
-// your session. Your role session lasts for the duration that you specify, or
-// until the time specified in the SAML authentication response's
-// SessionNotOnOrAfter value, whichever is shorter. You can provide a
-// DurationSeconds value from 900 seconds (15 minutes) up to the maximum session
-// duration setting for the role. This setting can have a value from 1 hour to 12
-// hours. To learn how to view the maximum value for your role, see View the
-// Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
-// in the IAM User Guide. The maximum session duration limit applies when you use
-// the AssumeRole* API operations or the assume-role* CLI commands. However the
-// limit does not apply when you use those operations to create a console URL. For
-// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
-// in the IAM User Guide. Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining)
-// limits your CLI or Amazon Web Services API role session to a maximum of one
+// produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide.
+//
+// The temporary security credentials returned by this operation consist of an
+// access key ID, a secret access key, and a security token. Applications can use
+// these temporary security credentials to sign calls to Amazon Web Services
+// services.
+//
+// # Session Duration
+//
+// By default, the temporary security credentials created by AssumeRoleWithSAML
+// last for one hour. However, you can use the optional DurationSeconds parameter
+// to specify the duration of your session. Your role session lasts for the
+// duration that you specify, or until the time specified in the SAML
+// authentication response's SessionNotOnOrAfter value, whichever is shorter. You
+// can provide a DurationSeconds value from 900 seconds (15 minutes) up to the
+// maximum session duration setting for the role. This setting can have a value
+// from 1 hour to 12 hours. To learn how to view the maximum value for your role,
+// see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. The maximum session duration limit applies when you
+// use the AssumeRole* API operations or the assume-role* CLI commands. However
+// the limit does not apply when you use those operations to create a console URL.
+// For more information, see [Using IAM Roles]in the IAM User Guide.
+//
+// [Role chaining]limits your CLI or Amazon Web Services API role session to a maximum of one
// hour. When you use the AssumeRole API operation to assume a role, you can
// specify the duration of your role session with the DurationSeconds parameter.
// You can specify a parameter value of up to 43200 seconds (12 hours), depending
// on the maximum session duration setting for your role. However, if you assume a
// role using role chaining and provide a DurationSeconds parameter value greater
-// than one hour, the operation fails. Permissions The temporary security
-// credentials created by AssumeRoleWithSAML can be used to make API calls to any
-// Amazon Web Services service with the following exception: you cannot call the
-// STS GetFederationToken or GetSessionToken API operations. (Optional) You can
-// pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// to this operation. You can pass a single JSON policy document to use as an
-// inline session policy. You can also specify up to 10 managed policy Amazon
-// Resource Names (ARNs) to use as managed session policies. The plaintext that you
-// use for both inline and managed session policies can't exceed 2,048 characters.
-// Passing policies to this operation returns new temporary credentials. The
-// resulting session's permissions are the intersection of the role's
-// identity-based policy and the session policies. You can use the role's temporary
-// credentials in subsequent Amazon Web Services API calls to access resources in
-// the account that owns the role. You cannot use session policies to grant more
-// permissions than those allowed by the identity-based policy of the role that is
-// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// in the IAM User Guide. Calling AssumeRoleWithSAML does not require the use of
-// Amazon Web Services security credentials. The identity of the caller is
-// validated by using keys in the metadata document that is uploaded for the SAML
-// provider entity for your identity provider. Calling AssumeRoleWithSAML can
-// result in an entry in your CloudTrail logs. The entry includes the value in the
-// NameID element of the SAML assertion. We recommend that you use a NameIDType
-// that is not associated with any personally identifiable information (PII). For
-// example, you could instead use the persistent identifier (
-// urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ). Tags (Optional) You can
-// configure your IdP to pass attributes into your SAML assertion as session tags.
-// Each session tag consists of a key name and an associated value. For more
-// information about session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
-// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session
-// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters.
-// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
-// in the IAM User Guide. An Amazon Web Services conversion compresses the passed
-// inline session policy, managed policy ARNs, and session tags into a packed
-// binary format that has a separate limit. Your request can fail for this limit
-// even if your plaintext meets the other requirements. The PackedPolicySize
-// response element indicates by percentage how close the policies and tags for
-// your request are to the upper size limit. You can pass a session tag with the
-// same key as a tag that is attached to the role. When you do, session tags
-// override the role's tags with the same key. An administrator must grant you the
-// permissions necessary to pass session tags. The administrator can also create
-// granular permissions to allow you to pass only specific session tags. For more
-// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
-// in the IAM User Guide. You can set the session tags as transitive. Transitive
-// tags persist during role chaining. For more information, see Chaining Roles
-// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
-// in the IAM User Guide. SAML Configuration Before your application can call
-// AssumeRoleWithSAML , you must configure your SAML identity provider (IdP) to
-// issue the claims required by Amazon Web Services. Additionally, you must use
-// Identity and Access Management (IAM) to create a SAML provider entity in your
-// Amazon Web Services account that represents your identity provider. You must
-// also create an IAM role that specifies this SAML provider in its trust policy.
+// than one hour, the operation fails.
+//
+// # Permissions
+//
+// The temporary security credentials created by AssumeRoleWithSAML can be used to
+// make API calls to any Amazon Web Services service with the following exception:
+// you cannot call the STS GetFederationToken or GetSessionToken API operations.
+//
+// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a
+// single JSON policy document to use as an inline session policy. You can also
+// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed
+// session policies. The plaintext that you use for both inline and managed session
+// policies can't exceed 2,048 characters. Passing policies to this operation
+// returns new temporary credentials. The resulting session's permissions are the
+// intersection of the role's identity-based policy and the session policies. You
+// can use the role's temporary credentials in subsequent Amazon Web Services API
+// calls to access resources in the account that owns the role. You cannot use
+// session policies to grant more permissions than those allowed by the
+// identity-based policy of the role that is being assumed. For more information,
+// see [Session Policies]in the IAM User Guide.
+//
+// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services
+// security credentials. The identity of the caller is validated by using keys in
+// the metadata document that is uploaded for the SAML provider entity for your
+// identity provider.
+//
+// Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs. The
+// entry includes the value in the NameID element of the SAML assertion. We
+// recommend that you use a NameIDType that is not associated with any personally
+// identifiable information (PII). For example, you could instead use the
+// persistent identifier ( urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ).
+//
+// # Tags
+//
+// (Optional) You can configure your IdP to pass attributes into your SAML
+// assertion as session tags. Each session tag consists of a key name and an
+// associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User
+// Guide.
+//
+// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed
+// 128 characters and the values can’t exceed 256 characters. For these and
+// additional limits, see [IAM and STS Character Limits]in the IAM User Guide.
+//
+// An Amazon Web Services conversion compresses the passed inline session policy,
+// managed policy ARNs, and session tags into a packed binary format that has a
+// separate limit. Your request can fail for this limit even if your plaintext
+// meets the other requirements. The PackedPolicySize response element indicates
+// by percentage how close the policies and tags for your request are to the upper
+// size limit.
+//
+// You can pass a session tag with the same key as a tag that is attached to the
+// role. When you do, session tags override the role's tags with the same key.
+//
+// An administrator must grant you the permissions necessary to pass session tags.
+// The administrator can also create granular permissions to allow you to pass only
+// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide.
+//
+// You can set the session tags as transitive. Transitive tags persist during role
+// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide.
+//
+// # SAML Configuration
+//
+// Before your application can call AssumeRoleWithSAML , you must configure your
+// SAML identity provider (IdP) to issue the claims required by Amazon Web
+// Services. Additionally, you must use Identity and Access Management (IAM) to
+// create a SAML provider entity in your Amazon Web Services account that
+// represents your identity provider. You must also create an IAM role that
+// specifies this SAML provider in its trust policy.
+//
// For more information, see the following resources:
-// - About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
-// in the IAM User Guide.
-// - Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
-// in the IAM User Guide.
-// - Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
-// in the IAM User Guide.
-// - Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
-// in the IAM User Guide.
+//
+// [About SAML 2.0-based Federation]
+// - in the IAM User Guide.
+//
+// [Creating SAML Identity Providers]
+// - in the IAM User Guide.
+//
+// [Configuring a Relying Party and Claims]
+// - in the IAM User Guide.
+//
+// [Creating a Role for SAML 2.0 Federation]
+// - in the IAM User Guide.
+//
+// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session
+// [Creating a Role for SAML 2.0 Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html
+// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
+// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison
+// [Creating SAML Identity Providers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html
+// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html
+// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html
+// [Configuring a Relying Party and Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html
+// [Role chaining]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining
+// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html
+// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+// [About SAML 2.0-based Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html
+// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining
func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) {
if params == nil {
params = &AssumeRoleWithSAMLInput{}
@@ -130,9 +170,11 @@ type AssumeRoleWithSAMLInput struct {
// This member is required.
RoleArn *string
- // The base64 encoded SAML authentication response provided by the IdP. For more
- // information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
- // in the IAM User Guide.
+ // The base64 encoded SAML authentication response provided by the IdP.
+ //
+ // For more information, see [Configuring a Relying Party and Adding Claims] in the IAM User Guide.
+ //
+ // [Configuring a Relying Party and Adding Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html
//
// This member is required.
SAMLAssertion *string
@@ -146,92 +188,114 @@ type AssumeRoleWithSAMLInput struct {
// than this setting, the operation fails. For example, if you specify a session
// duration of 12 hours, but your administrator set the maximum session duration to
// 6 hours, your operation fails. To learn how to view the maximum value for your
- // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
- // in the IAM User Guide. By default, the value is set to 3600 seconds. The
- // DurationSeconds parameter is separate from the duration of a console session
- // that you might request using the returned credentials. The request to the
- // federation endpoint for a console sign-in token takes a SessionDuration
+ // role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request to
+ // the federation endpoint for a console sign-in token takes a SessionDuration
// parameter that specifies the maximum length of the console session. For more
- // information, see Creating a URL that Enables Federated Users to Access the
- // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
- // in the IAM User Guide.
+ // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide.
+ //
+ // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session
+ // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html
DurationSeconds *int32
// An IAM policy in JSON format that you want to use as an inline session policy.
+ //
// This parameter is optional. Passing policies to this operation returns new
// temporary credentials. The resulting session's permissions are the intersection
// of the role's identity-based policy and the session policies. You can use the
// role's temporary credentials in subsequent Amazon Web Services API calls to
// access resources in the account that owns the role. You cannot use session
// policies to grant more permissions than those allowed by the identity-based
- // policy of the role that is being assumed. For more information, see Session
- // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide. The plaintext that you use for both inline and managed
- // session policies can't exceed 2,048 characters. The JSON policy characters can
- // be any ASCII character from the space character to the end of the valid
- // character list (\u0020 through \u00FF). It can also include the tab (\u0009),
- // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web
- // Services conversion compresses the passed inline session policy, managed policy
- // ARNs, and session tags into a packed binary format that has a separate limit.
- // Your request can fail for this limit even if your plaintext meets the other
- // requirements. The PackedPolicySize response element indicates by percentage how
- // close the policies and tags for your request are to the upper size limit.
+ // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM
+ // User Guide.
+ //
+ // The plaintext that you use for both inline and managed session policies can't
+ // exceed 2,048 characters. The JSON policy characters can be any ASCII character
+ // from the space character to the end of the valid character list (\u0020 through
+ // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
+ // return (\u000D) characters.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
Policy *string
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want to
// use as managed session policies. The policies must exist in the same account as
- // the role. This parameter is optional. You can provide up to 10 managed policy
- // ARNs. However, the plaintext that you use for both inline and managed session
- // policies can't exceed 2,048 characters. For more information about ARNs, see
- // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // in the Amazon Web Services General Reference. An Amazon Web Services conversion
- // compresses the passed inline session policy, managed policy ARNs, and session
- // tags into a packed binary format that has a separate limit. Your request can
- // fail for this limit even if your plaintext meets the other requirements. The
- // PackedPolicySize response element indicates by percentage how close the policies
- // and tags for your request are to the upper size limit. Passing policies to this
- // operation returns new temporary credentials. The resulting session's permissions
- // are the intersection of the role's identity-based policy and the session
- // policies. You can use the role's temporary credentials in subsequent Amazon Web
- // Services API calls to access resources in the account that owns the role. You
- // cannot use session policies to grant more permissions than those allowed by the
- // identity-based policy of the role that is being assumed. For more information,
- // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide.
+ // the role.
+ //
+ // This parameter is optional. You can provide up to 10 managed policy ARNs.
+ // However, the plaintext that you use for both inline and managed session policies
+ // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the
+ // Amazon Web Services General Reference.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's
+ // identity-based policy and the session policies. You can use the role's temporary
+ // credentials in subsequent Amazon Web Services API calls to access resources in
+ // the account that owns the role. You cannot use session policies to grant more
+ // permissions than those allowed by the identity-based policy of the role that is
+ // being assumed. For more information, see [Session Policies]in the IAM User Guide.
+ //
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
PolicyArns []types.PolicyDescriptorType
noSmithyDocumentSerde
}
-// Contains the response to a successful AssumeRoleWithSAML request, including
-// temporary Amazon Web Services credentials that can be used to make Amazon Web
-// Services requests.
+// Contains the response to a successful AssumeRoleWithSAML request, including temporary Amazon Web
+// Services credentials that can be used to make Amazon Web Services requests.
type AssumeRoleWithSAMLOutput struct {
// The identifiers for the temporary security credentials that the operation
// returns.
AssumedRoleUser *types.AssumedRoleUser
- // The value of the Recipient attribute of the SubjectConfirmationData element of
+ // The value of the Recipient attribute of the SubjectConfirmationData element of
// the SAML assertion.
Audience *string
// The temporary security credentials, which include an access key ID, a secret
- // access key, and a security (or session) token. The size of the security token
- // that STS API operations return is not fixed. We strongly recommend that you make
- // no assumptions about the maximum size.
+ // access key, and a security (or session) token.
+ //
+ // The size of the security token that STS API operations return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size.
Credentials *types.Credentials
// The value of the Issuer element of the SAML assertion.
Issuer *string
// A hash value based on the concatenation of the following:
+ //
// - The Issuer response value.
+ //
// - The Amazon Web Services account ID.
+ //
// - The friendly name (the last part of the ARN) of the SAML provider in IAM.
+ //
// The combination of NameQualifier and Subject can be used to uniquely identify a
- // user. The following pseudocode shows how the hash value is calculated: BASE64 (
- // SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) )
+ // user.
+ //
+ // The following pseudocode shows how the hash value is calculated:
+ //
+ // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) )
NameQualifier *string
// A percentage value that indicates the packed size of the session policies and
@@ -240,31 +304,36 @@ type AssumeRoleWithSAMLOutput struct {
// allowed space.
PackedPolicySize *int32
- // The value in the SourceIdentity attribute in the SAML assertion. You can
- // require users to set a source identity value when they assume a role. You do
- // this by using the sts:SourceIdentity condition key in a role trust policy. That
- // way, actions that are taken with the role are associated with that user. After
- // the source identity is set, the value cannot be changed. It is present in the
- // request for all actions that are taken by the role and persists across chained
- // role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining)
- // sessions. You can configure your SAML identity provider to use an attribute
- // associated with your users, like user name or email, as the source identity when
- // calling AssumeRoleWithSAML . You do this by adding an attribute to the SAML
- // assertion. For more information about using source identity, see Monitor and
- // control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
- // in the IAM User Guide. The regex used to validate this parameter is a string of
- // characters consisting of upper- and lower-case alphanumeric characters with no
- // spaces. You can also include underscores or any of the following characters:
- // =,.@-
+ // The value in the SourceIdentity attribute in the SAML assertion.
+ //
+ // You can require users to set a source identity value when they assume a role.
+ // You do this by using the sts:SourceIdentity condition key in a role trust
+ // policy. That way, actions that are taken with the role are associated with that
+ // user. After the source identity is set, the value cannot be changed. It is
+ // present in the request for all actions that are taken by the role and persists
+ // across [chained role]sessions. You can configure your SAML identity provider to use an
+ // attribute associated with your users, like user name or email, as the source
+ // identity when calling AssumeRoleWithSAML . You do this by adding an attribute to
+ // the SAML assertion. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in
+ // the IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
+ //
+ // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining
+ // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html
SourceIdentity *string
// The value of the NameID element in the Subject element of the SAML assertion.
Subject *string
- // The format of the name ID, as defined by the Format attribute in the NameID
+ // The format of the name ID, as defined by the Format attribute in the NameID
// element of the SAML assertion. Typical examples of the format are transient or
- // persistent . If the format includes the prefix
- // urn:oasis:names:tc:SAML:2.0:nameid-format , that prefix is removed. For example,
+ // persistent .
+ //
+ // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format ,
+ // that prefix is removed. For example,
// urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient .
// If the format includes any other prefix, the format is returned with no
// modifications.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go
index 98108ce6af..6c8cf43e53 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go
@@ -14,105 +14,143 @@ import (
// Returns a set of temporary security credentials for users who have been
// authenticated in a mobile or web application with a web identity provider.
// Example providers include the OAuth 2.0 providers Login with Amazon and
-// Facebook, or any OpenID Connect-compatible identity provider such as Google or
-// Amazon Cognito federated identities (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html)
-// . For mobile applications, we recommend that you use Amazon Cognito. You can use
-// Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/)
-// and the Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/)
-// to uniquely identify a user. You can also supply the user with a consistent
-// identity throughout the lifetime of an application. To learn more about Amazon
-// Cognito, see Amazon Cognito identity pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html)
-// in Amazon Cognito Developer Guide. Calling AssumeRoleWithWebIdentity does not
-// require the use of Amazon Web Services security credentials. Therefore, you can
-// distribute an application (for example, on mobile devices) that requests
-// temporary security credentials without including long-term Amazon Web Services
-// credentials in the application. You also don't need to deploy server-based proxy
-// services that use long-term Amazon Web Services credentials. Instead, the
-// identity of the caller is validated by using a token from the web identity
-// provider. For a comparison of AssumeRoleWithWebIdentity with the other API
-// operations that produce temporary credentials, see Requesting Temporary
-// Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
-// in the IAM User Guide. The temporary security credentials returned by this API
-// consist of an access key ID, a secret access key, and a security token.
-// Applications can use these temporary security credentials to sign calls to
-// Amazon Web Services service API operations. Session Duration By default, the
-// temporary security credentials created by AssumeRoleWithWebIdentity last for
-// one hour. However, you can use the optional DurationSeconds parameter to
-// specify the duration of your session. You can provide a value from 900 seconds
-// (15 minutes) up to the maximum session duration setting for the role. This
-// setting can have a value from 1 hour to 12 hours. To learn how to view the
-// maximum value for your role, see View the Maximum Session Duration Setting for
-// a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
-// in the IAM User Guide. The maximum session duration limit applies when you use
-// the AssumeRole* API operations or the assume-role* CLI commands. However the
-// limit does not apply when you use those operations to create a console URL. For
-// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
-// in the IAM User Guide. Permissions The temporary security credentials created by
-// AssumeRoleWithWebIdentity can be used to make API calls to any Amazon Web
-// Services service with the following exception: you cannot call the STS
-// GetFederationToken or GetSessionToken API operations. (Optional) You can pass
-// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// to this operation. You can pass a single JSON policy document to use as an
-// inline session policy. You can also specify up to 10 managed policy Amazon
-// Resource Names (ARNs) to use as managed session policies. The plaintext that you
-// use for both inline and managed session policies can't exceed 2,048 characters.
-// Passing policies to this operation returns new temporary credentials. The
-// resulting session's permissions are the intersection of the role's
-// identity-based policy and the session policies. You can use the role's temporary
-// credentials in subsequent Amazon Web Services API calls to access resources in
-// the account that owns the role. You cannot use session policies to grant more
-// permissions than those allowed by the identity-based policy of the role that is
-// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// in the IAM User Guide. Tags (Optional) You can configure your IdP to pass
-// attributes into your web identity token as session tags. Each session tag
-// consists of a key name and an associated value. For more information about
-// session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
-// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session
-// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters.
-// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
-// in the IAM User Guide. An Amazon Web Services conversion compresses the passed
-// inline session policy, managed policy ARNs, and session tags into a packed
-// binary format that has a separate limit. Your request can fail for this limit
-// even if your plaintext meets the other requirements. The PackedPolicySize
-// response element indicates by percentage how close the policies and tags for
-// your request are to the upper size limit. You can pass a session tag with the
-// same key as a tag that is attached to the role. When you do, the session tag
-// overrides the role tag with the same key. An administrator must grant you the
-// permissions necessary to pass session tags. The administrator can also create
-// granular permissions to allow you to pass only specific session tags. For more
-// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
-// in the IAM User Guide. You can set the session tags as transitive. Transitive
-// tags persist during role chaining. For more information, see Chaining Roles
-// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
-// in the IAM User Guide. Identities Before your application can call
-// AssumeRoleWithWebIdentity , you must have an identity token from a supported
-// identity provider and create a role that the application can assume. The role
-// that your application assumes must trust the identity provider that is
-// associated with the identity token. In other words, the identity provider must
-// be specified in the role's trust policy. Calling AssumeRoleWithWebIdentity can
-// result in an entry in your CloudTrail logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims)
-// of the provided web identity token. We recommend that you avoid using any
-// personally identifiable information (PII) in this field. For example, you could
-// instead use a GUID or a pairwise identifier, as suggested in the OIDC
-// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes)
-// . For more information about how to use web identity federation and the
+// Facebook, or any OpenID Connect-compatible identity provider such as Google or [Amazon Cognito federated identities].
+//
+// For mobile applications, we recommend that you use Amazon Cognito. You can use
+// Amazon Cognito with the [Amazon Web Services SDK for iOS Developer Guide]and the [Amazon Web Services SDK for Android Developer Guide] to uniquely identify a user. You can also
+// supply the user with a consistent identity throughout the lifetime of an
+// application.
+//
+// To learn more about Amazon Cognito, see [Amazon Cognito identity pools] in Amazon Cognito Developer Guide.
+//
+// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web
+// Services security credentials. Therefore, you can distribute an application (for
+// example, on mobile devices) that requests temporary security credentials without
+// including long-term Amazon Web Services credentials in the application. You also
+// don't need to deploy server-based proxy services that use long-term Amazon Web
+// Services credentials. Instead, the identity of the caller is validated by using
+// a token from the web identity provider. For a comparison of
+// AssumeRoleWithWebIdentity with the other API operations that produce temporary
+// credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide.
+//
+// The temporary security credentials returned by this API consist of an access
+// key ID, a secret access key, and a security token. Applications can use these
+// temporary security credentials to sign calls to Amazon Web Services service API
+// operations.
+//
+// # Session Duration
+//
+// By default, the temporary security credentials created by
+// AssumeRoleWithWebIdentity last for one hour. However, you can use the optional
+// DurationSeconds parameter to specify the duration of your session. You can
+// provide a value from 900 seconds (15 minutes) up to the maximum session duration
+// setting for the role. This setting can have a value from 1 hour to 12 hours. To
+// learn how to view the maximum value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide.
+// The maximum session duration limit applies when you use the AssumeRole* API
+// operations or the assume-role* CLI commands. However the limit does not apply
+// when you use those operations to create a console URL. For more information, see
+// [Using IAM Roles]in the IAM User Guide.
+//
+// # Permissions
+//
+// The temporary security credentials created by AssumeRoleWithWebIdentity can be
+// used to make API calls to any Amazon Web Services service with the following
+// exception: you cannot call the STS GetFederationToken or GetSessionToken API
+// operations.
+//
+// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a
+// single JSON policy document to use as an inline session policy. You can also
+// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed
+// session policies. The plaintext that you use for both inline and managed session
+// policies can't exceed 2,048 characters. Passing policies to this operation
+// returns new temporary credentials. The resulting session's permissions are the
+// intersection of the role's identity-based policy and the session policies. You
+// can use the role's temporary credentials in subsequent Amazon Web Services API
+// calls to access resources in the account that owns the role. You cannot use
+// session policies to grant more permissions than those allowed by the
+// identity-based policy of the role that is being assumed. For more information,
+// see [Session Policies]in the IAM User Guide.
+//
+// # Tags
+//
+// (Optional) You can configure your IdP to pass attributes into your web identity
+// token as session tags. Each session tag consists of a key name and an associated
+// value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User Guide.
+//
+// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed
+// 128 characters and the values can’t exceed 256 characters. For these and
+// additional limits, see [IAM and STS Character Limits]in the IAM User Guide.
+//
+// An Amazon Web Services conversion compresses the passed inline session policy,
+// managed policy ARNs, and session tags into a packed binary format that has a
+// separate limit. Your request can fail for this limit even if your plaintext
+// meets the other requirements. The PackedPolicySize response element indicates
+// by percentage how close the policies and tags for your request are to the upper
+// size limit.
+//
+// You can pass a session tag with the same key as a tag that is attached to the
+// role. When you do, the session tag overrides the role tag with the same key.
+//
+// An administrator must grant you the permissions necessary to pass session tags.
+// The administrator can also create granular permissions to allow you to pass only
+// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide.
+//
+// You can set the session tags as transitive. Transitive tags persist during role
+// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide.
+//
+// # Identities
+//
+// Before your application can call AssumeRoleWithWebIdentity , you must have an
+// identity token from a supported identity provider and create a role that the
+// application can assume. The role that your application assumes must trust the
+// identity provider that is associated with the identity token. In other words,
+// the identity provider must be specified in the role's trust policy.
+//
+// Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail
+// logs. The entry includes the [Subject]of the provided web identity token. We recommend
+// that you avoid using any personally identifiable information (PII) in this
+// field. For example, you could instead use a GUID or a pairwise identifier, as [suggested in the OIDC specification].
+//
+// For more information about how to use web identity federation and the
// AssumeRoleWithWebIdentity API, see the following resources:
-// - Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
-// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity)
-// .
-// - Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/)
-// . Walk through the process of authenticating through Login with Amazon,
+//
+// [Using Web Identity Federation API Operations for Mobile Apps]
+// - and [Federation Through a Web-based Identity Provider].
+//
+// [Web Identity Federation Playground]
+// - . Walk through the process of authenticating through Login with Amazon,
// Facebook, or Google, getting temporary security credentials, and then using
// those credentials to make a request to Amazon Web Services.
-// - Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/)
-// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/)
-// . These toolkits contain sample apps that show how to invoke the identity
-// providers. The toolkits then show how to use the information from these
+//
+// [Amazon Web Services SDK for iOS Developer Guide]
+// - and [Amazon Web Services SDK for Android Developer Guide]. These toolkits contain sample apps that show how to invoke the
+// identity providers. The toolkits then show how to use the information from these
// providers to get and use temporary security credentials.
-// - Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications)
-// . This article discusses web identity federation and shows an example of how to
-// use web identity federation to get access to content in Amazon S3.
+//
+// [Web Identity Federation with Mobile Applications]
+// - . This article discusses web identity federation and shows an example of
+// how to use web identity federation to get access to content in Amazon S3.
+//
+// [Amazon Web Services SDK for iOS Developer Guide]: http://aws.amazon.com/sdkforios/
+// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session
+// [Web Identity Federation Playground]: https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/
+// [Amazon Web Services SDK for Android Developer Guide]: http://aws.amazon.com/sdkforandroid/
+// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
+// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison
+// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html
+// [Subject]: http://openid.net/specs/openid-connect-core-1_0.html#Claims
+// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html
+// [Amazon Cognito identity pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html
+// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity
+// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html
+// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Amazon Cognito federated identities]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html
+// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining
+// [Web Identity Federation with Mobile Applications]: http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications
+// [Using Web Identity Federation API Operations for Mobile Apps]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html
+// [suggested in the OIDC specification]: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes
func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) {
if params == nil {
params = &AssumeRoleWithWebIdentityInput{}
@@ -139,10 +177,11 @@ type AssumeRoleWithWebIdentityInput struct {
// identifier that is associated with the user who is using your application. That
// way, the temporary security credentials that your application will use are
// associated with that user. This session name is included as part of the ARN and
- // assumed role ID in the AssumedRoleUser response element. The regex used to
- // validate this parameter is a string of characters consisting of upper- and
- // lower-case alphanumeric characters with no spaces. You can also include
- // underscores or any of the following characters: =,.@-
+ // assumed role ID in the AssumedRoleUser response element.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
//
// This member is required.
RoleSessionName *string
@@ -162,73 +201,90 @@ type AssumeRoleWithWebIdentityInput struct {
// higher than this setting, the operation fails. For example, if you specify a
// session duration of 12 hours, but your administrator set the maximum session
// duration to 6 hours, your operation fails. To learn how to view the maximum
- // value for your role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
- // in the IAM User Guide. By default, the value is set to 3600 seconds. The
- // DurationSeconds parameter is separate from the duration of a console session
- // that you might request using the returned credentials. The request to the
- // federation endpoint for a console sign-in token takes a SessionDuration
+ // value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request to
+ // the federation endpoint for a console sign-in token takes a SessionDuration
// parameter that specifies the maximum length of the console session. For more
- // information, see Creating a URL that Enables Federated Users to Access the
- // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
- // in the IAM User Guide.
+ // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide.
+ //
+ // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session
+ // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html
DurationSeconds *int32
// An IAM policy in JSON format that you want to use as an inline session policy.
+ //
// This parameter is optional. Passing policies to this operation returns new
// temporary credentials. The resulting session's permissions are the intersection
// of the role's identity-based policy and the session policies. You can use the
// role's temporary credentials in subsequent Amazon Web Services API calls to
// access resources in the account that owns the role. You cannot use session
// policies to grant more permissions than those allowed by the identity-based
- // policy of the role that is being assumed. For more information, see Session
- // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide. The plaintext that you use for both inline and managed
- // session policies can't exceed 2,048 characters. The JSON policy characters can
- // be any ASCII character from the space character to the end of the valid
- // character list (\u0020 through \u00FF). It can also include the tab (\u0009),
- // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web
- // Services conversion compresses the passed inline session policy, managed policy
- // ARNs, and session tags into a packed binary format that has a separate limit.
- // Your request can fail for this limit even if your plaintext meets the other
- // requirements. The PackedPolicySize response element indicates by percentage how
- // close the policies and tags for your request are to the upper size limit.
+ // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM
+ // User Guide.
+ //
+ // The plaintext that you use for both inline and managed session policies can't
+ // exceed 2,048 characters. The JSON policy characters can be any ASCII character
+ // from the space character to the end of the valid character list (\u0020 through
+ // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
+ // return (\u000D) characters.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
Policy *string
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want to
// use as managed session policies. The policies must exist in the same account as
- // the role. This parameter is optional. You can provide up to 10 managed policy
- // ARNs. However, the plaintext that you use for both inline and managed session
- // policies can't exceed 2,048 characters. For more information about ARNs, see
- // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // in the Amazon Web Services General Reference. An Amazon Web Services conversion
- // compresses the passed inline session policy, managed policy ARNs, and session
- // tags into a packed binary format that has a separate limit. Your request can
- // fail for this limit even if your plaintext meets the other requirements. The
- // PackedPolicySize response element indicates by percentage how close the policies
- // and tags for your request are to the upper size limit. Passing policies to this
- // operation returns new temporary credentials. The resulting session's permissions
- // are the intersection of the role's identity-based policy and the session
- // policies. You can use the role's temporary credentials in subsequent Amazon Web
- // Services API calls to access resources in the account that owns the role. You
- // cannot use session policies to grant more permissions than those allowed by the
- // identity-based policy of the role that is being assumed. For more information,
- // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide.
+ // the role.
+ //
+ // This parameter is optional. You can provide up to 10 managed policy ARNs.
+ // However, the plaintext that you use for both inline and managed session policies
+ // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the
+ // Amazon Web Services General Reference.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's
+ // identity-based policy and the session policies. You can use the role's temporary
+ // credentials in subsequent Amazon Web Services API calls to access resources in
+ // the account that owns the role. You cannot use session policies to grant more
+ // permissions than those allowed by the identity-based policy of the role that is
+ // being assumed. For more information, see [Session Policies]in the IAM User Guide.
+ //
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
PolicyArns []types.PolicyDescriptorType
// The fully qualified host component of the domain name of the OAuth 2.0 identity
// provider. Do not specify this value for an OpenID Connect identity provider.
+ //
// Currently www.amazon.com and graph.facebook.com are the only supported identity
// providers for OAuth 2.0 access tokens. Do not include URL schemes and port
- // numbers. Do not specify this value for OpenID Connect ID tokens.
+ // numbers.
+ //
+ // Do not specify this value for OpenID Connect ID tokens.
ProviderId *string
noSmithyDocumentSerde
}
-// Contains the response to a successful AssumeRoleWithWebIdentity request,
-// including temporary Amazon Web Services credentials that can be used to make
-// Amazon Web Services requests.
+// Contains the response to a successful AssumeRoleWithWebIdentity request, including temporary Amazon Web
+// Services credentials that can be used to make Amazon Web Services requests.
type AssumeRoleWithWebIdentityOutput struct {
// The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
@@ -244,9 +300,10 @@ type AssumeRoleWithWebIdentityOutput struct {
Audience *string
// The temporary security credentials, which include an access key ID, a secret
- // access key, and a security token. The size of the security token that STS API
- // operations return is not fixed. We strongly recommend that you make no
- // assumptions about the maximum size.
+ // access key, and a security token.
+ //
+ // The size of the security token that STS API operations return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size.
Credentials *types.Credentials
// A percentage value that indicates the packed size of the session policies and
@@ -255,30 +312,34 @@ type AssumeRoleWithWebIdentityOutput struct {
// allowed space.
PackedPolicySize *int32
- // The issuing authority of the web identity token presented. For OpenID Connect
+ // The issuing authority of the web identity token presented. For OpenID Connect
// ID tokens, this contains the value of the iss field. For OAuth 2.0 access
// tokens, this contains the value of the ProviderId parameter that was passed in
// the AssumeRoleWithWebIdentity request.
Provider *string
// The value of the source identity that is returned in the JSON web token (JWT)
- // from the identity provider. You can require users to set a source identity value
- // when they assume a role. You do this by using the sts:SourceIdentity condition
- // key in a role trust policy. That way, actions that are taken with the role are
- // associated with that user. After the source identity is set, the value cannot be
- // changed. It is present in the request for all actions that are taken by the role
- // and persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining)
- // sessions. You can configure your identity provider to use an attribute
+ // from the identity provider.
+ //
+ // You can require users to set a source identity value when they assume a role.
+ // You do this by using the sts:SourceIdentity condition key in a role trust
+ // policy. That way, actions that are taken with the role are associated with that
+ // user. After the source identity is set, the value cannot be changed. It is
+ // present in the request for all actions that are taken by the role and persists
+ // across [chained role]sessions. You can configure your identity provider to use an attribute
// associated with your users, like user name or email, as the source identity when
// calling AssumeRoleWithWebIdentity . You do this by adding a claim to the JSON
- // web token. To learn more about OIDC tokens and claims, see Using Tokens with
- // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html)
- // in the Amazon Cognito Developer Guide. For more information about using source
- // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
- // in the IAM User Guide. The regex used to validate this parameter is a string of
- // characters consisting of upper- and lower-case alphanumeric characters with no
- // spaces. You can also include underscores or any of the following characters:
- // =,.@-
+ // web token. To learn more about OIDC tokens and claims, see [Using Tokens with User Pools]in the Amazon
+ // Cognito Developer Guide. For more information about using source identity, see [Monitor and control actions taken with assumed roles]
+ // in the IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
+ //
+ // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining
+ // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html
+ // [Using Tokens with User Pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html
SourceIdentity *string
// The unique user identifier that is returned by the identity provider. This
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go
index b4ad54ab2f..186a8cb583 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go
@@ -11,28 +11,39 @@ import (
)
// Decodes additional information about the authorization status of a request from
-// an encoded message returned in response to an Amazon Web Services request. For
-// example, if a user is not authorized to perform an operation that he or she has
-// requested, the request returns a Client.UnauthorizedOperation response (an HTTP
-// 403 response). Some Amazon Web Services operations additionally return an
-// encoded message that can provide details about this authorization failure. Only
-// certain Amazon Web Services operations return an encoded authorization message.
-// The documentation for an individual operation indicates whether that operation
-// returns an encoded message in addition to returning an HTTP code. The message is
-// encoded because the details of the authorization status can contain privileged
-// information that the user who requested the operation should not see. To decode
-// an authorization status message, a user must be granted permissions through an
-// IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)
-// to request the DecodeAuthorizationMessage ( sts:DecodeAuthorizationMessage )
-// action. The decoded message includes the following type of information:
+// an encoded message returned in response to an Amazon Web Services request.
+//
+// For example, if a user is not authorized to perform an operation that he or she
+// has requested, the request returns a Client.UnauthorizedOperation response (an
+// HTTP 403 response). Some Amazon Web Services operations additionally return an
+// encoded message that can provide details about this authorization failure.
+//
+// Only certain Amazon Web Services operations return an encoded authorization
+// message. The documentation for an individual operation indicates whether that
+// operation returns an encoded message in addition to returning an HTTP code.
+//
+// The message is encoded because the details of the authorization status can
+// contain privileged information that the user who requested the operation should
+// not see. To decode an authorization status message, a user must be granted
+// permissions through an IAM [policy]to request the DecodeAuthorizationMessage (
+// sts:DecodeAuthorizationMessage ) action.
+//
+// The decoded message includes the following type of information:
+//
// - Whether the request was denied due to an explicit deny or due to the
-// absence of an explicit allow. For more information, see Determining Whether a
-// Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
-// in the IAM User Guide.
+// absence of an explicit allow. For more information, see [Determining Whether a Request is Allowed or Denied]in the IAM User
+// Guide.
+//
// - The principal who made the request.
+//
// - The requested action.
+//
// - The requested resource.
+//
// - The values of condition keys in the context of the user's request.
+//
+// [Determining Whether a Request is Allowed or Denied]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow
+// [policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) {
if params == nil {
params = &DecodeAuthorizationMessageInput{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go
index 1f7cbcc2bb..b6eb6401af 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go
@@ -10,23 +10,31 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// Returns the account identifier for the specified access key ID. Access keys
-// consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE ) and
-// a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ).
-// For more information about access keys, see Managing Access Keys for IAM Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html)
-// in the IAM User Guide. When you pass an access key ID to this operation, it
-// returns the ID of the Amazon Web Services account to which the keys belong.
-// Access key IDs beginning with AKIA are long-term credentials for an IAM user or
-// the Amazon Web Services account root user. Access key IDs beginning with ASIA
-// are temporary credentials that are created using STS operations. If the account
-// in the response belongs to you, you can sign in as the root user and review your
-// root user access keys. Then, you can pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html)
-// to learn which IAM user owns the keys. To learn who requested the temporary
-// credentials for an ASIA access key, view the STS events in your CloudTrail logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html)
-// in the IAM User Guide. This operation does not indicate the state of the access
-// key. The key might be active, inactive, or deleted. Active keys might not have
-// permissions to perform an operation. Providing a deleted access key might return
-// an error that the key doesn't exist.
+// Returns the account identifier for the specified access key ID.
+//
+// Access keys consist of two parts: an access key ID (for example,
+// AKIAIOSFODNN7EXAMPLE ) and a secret access key (for example,
+// wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). For more information about access
+// keys, see [Managing Access Keys for IAM Users]in the IAM User Guide.
+//
+// When you pass an access key ID to this operation, it returns the ID of the
+// Amazon Web Services account to which the keys belong. Access key IDs beginning
+// with AKIA are long-term credentials for an IAM user or the Amazon Web Services
+// account root user. Access key IDs beginning with ASIA are temporary credentials
+// that are created using STS operations. If the account in the response belongs to
+// you, you can sign in as the root user and review your root user access keys.
+// Then, you can pull a [credentials report]to learn which IAM user owns the keys. To learn who
+// requested the temporary credentials for an ASIA access key, view the STS events
+// in your [CloudTrail logs]in the IAM User Guide.
+//
+// This operation does not indicate the state of the access key. The key might be
+// active, inactive, or deleted. Active keys might not have permissions to perform
+// an operation. Providing a deleted access key might return an error that the key
+// doesn't exist.
+//
+// [credentials report]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html
+// [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html
+// [Managing Access Keys for IAM Users]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html
func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoInput, optFns ...func(*Options)) (*GetAccessKeyInfoOutput, error) {
if params == nil {
params = &GetAccessKeyInfoInput{}
@@ -44,9 +52,10 @@ func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoI
type GetAccessKeyInfoInput struct {
- // The identifier of an access key. This parameter allows (through its regex
- // pattern) a string of characters that can consist of any upper- or lowercase
- // letter or digit.
+ // The identifier of an access key.
+ //
+ // This parameter allows (through its regex pattern) a string of characters that
+ // can consist of any upper- or lowercase letter or digit.
//
// This member is required.
AccessKeyId *string
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go
index acb7ede44f..ed4c82832a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go
@@ -12,13 +12,15 @@ import (
)
// Returns details about the IAM user or role whose credentials are used to call
-// the operation. No permissions are required to perform this operation. If an
-// administrator attaches a policy to your identity that explicitly denies access
-// to the sts:GetCallerIdentity action, you can still perform this operation.
-// Permissions are not required because the same information is returned when
-// access is denied. To view an example response, see I Am Not Authorized to
-// Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa)
-// in the IAM User Guide.
+// the operation.
+//
+// No permissions are required to perform this operation. If an administrator
+// attaches a policy to your identity that explicitly denies access to the
+// sts:GetCallerIdentity action, you can still perform this operation. Permissions
+// are not required because the same information is returned when access is denied.
+// To view an example response, see [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]in the IAM User Guide.
+//
+// [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa
func (c *Client) GetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*Options)) (*GetCallerIdentityOutput, error) {
if params == nil {
params = &GetCallerIdentityInput{}
@@ -38,8 +40,8 @@ type GetCallerIdentityInput struct {
noSmithyDocumentSerde
}
-// Contains the response to a successful GetCallerIdentity request, including
-// information about the entity making the request.
+// Contains the response to a successful GetCallerIdentity request, including information about the
+// entity making the request.
type GetCallerIdentityOutput struct {
// The Amazon Web Services account ID number of the account that owns or contains
@@ -51,8 +53,10 @@ type GetCallerIdentityOutput struct {
// The unique identifier of the calling entity. The exact value depends on the
// type of entity that is making the call. The values returned are those listed in
- // the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable)
- // found on the Policy Variables reference page in the IAM User Guide.
+ // the aws:userid column in the [Principal table]found on the Policy Variables reference page in
+ // the IAM User Guide.
+ //
+ // [Principal table]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable
UserId *string
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go
index 3679618cb5..37bde0cce6 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go
@@ -14,74 +14,100 @@ import (
// Returns a set of temporary security credentials (consisting of an access key
// ID, a secret access key, and a security token) for a user. A typical use is in a
// proxy application that gets temporary security credentials on behalf of
-// distributed applications inside a corporate network. You must call the
-// GetFederationToken operation using the long-term security credentials of an IAM
-// user. As a result, this call is appropriate in contexts where those credentials
-// can be safeguarded, usually in a server-based application. For a comparison of
-// GetFederationToken with the other API operations that produce temporary
-// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
-// in the IAM User Guide. Although it is possible to call GetFederationToken using
-// the security credentials of an Amazon Web Services account root user rather than
-// an IAM user that you create for the purpose of a proxy application, we do not
-// recommend it. For more information, see Safeguard your root user credentials
-// and don't use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials)
-// in the IAM User Guide. You can create a mobile-based or browser-based app that
-// can authenticate users using a web identity provider like Login with Amazon,
-// Facebook, Google, or an OpenID Connect-compatible identity provider. In this
-// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/)
-// or AssumeRoleWithWebIdentity . For more information, see Federation Through a
-// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity)
-// in the IAM User Guide. Session duration The temporary credentials are valid for
-// the specified duration, from 900 seconds (15 minutes) up to a maximum of 129,600
-// seconds (36 hours). The default session duration is 43,200 seconds (12 hours).
-// Temporary credentials obtained by using the root user credentials have a maximum
-// duration of 3,600 seconds (1 hour). Permissions You can use the temporary
-// credentials created by GetFederationToken in any Amazon Web Services service
-// with the following exceptions:
+// distributed applications inside a corporate network.
+//
+// You must call the GetFederationToken operation using the long-term security
+// credentials of an IAM user. As a result, this call is appropriate in contexts
+// where those credentials can be safeguarded, usually in a server-based
+// application. For a comparison of GetFederationToken with the other API
+// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide.
+//
+// Although it is possible to call GetFederationToken using the security
+// credentials of an Amazon Web Services account root user rather than an IAM user
+// that you create for the purpose of a proxy application, we do not recommend it.
+// For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in the IAM User Guide.
+//
+// You can create a mobile-based or browser-based app that can authenticate users
+// using a web identity provider like Login with Amazon, Facebook, Google, or an
+// OpenID Connect-compatible identity provider. In this case, we recommend that you
+// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User
+// Guide.
+//
+// # Session duration
+//
+// The temporary credentials are valid for the specified duration, from 900
+// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default
+// session duration is 43,200 seconds (12 hours). Temporary credentials obtained by
+// using the root user credentials have a maximum duration of 3,600 seconds (1
+// hour).
+//
+// # Permissions
+//
+// You can use the temporary credentials created by GetFederationToken in any
+// Amazon Web Services service with the following exceptions:
+//
// - You cannot call any IAM operations using the CLI or the Amazon Web Services
// API. This limitation does not apply to console sessions.
+//
// - You cannot call any STS operations except GetCallerIdentity .
//
-// You can use temporary credentials for single sign-on (SSO) to the console. You
-// must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// to this operation. You can pass a single JSON policy document to use as an
-// inline session policy. You can also specify up to 10 managed policy Amazon
-// Resource Names (ARNs) to use as managed session policies. The plaintext that you
-// use for both inline and managed session policies can't exceed 2,048 characters.
+// You can use temporary credentials for single sign-on (SSO) to the console.
+//
+// You must pass an inline or managed [session policy] to this operation. You can pass a single
+// JSON policy document to use as an inline session policy. You can also specify up
+// to 10 managed policy Amazon Resource Names (ARNs) to use as managed session
+// policies. The plaintext that you use for both inline and managed session
+// policies can't exceed 2,048 characters.
+//
// Though the session policy parameters are optional, if you do not pass a policy,
// then the resulting federated user session has no permissions. When you pass
// session policies, the session permissions are the intersection of the IAM user
// policies and the session policies that you pass. This gives you a way to further
// restrict the permissions for a federated user. You cannot use session policies
// to grant more permissions than those that are defined in the permissions policy
-// of the IAM user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// in the IAM User Guide. For information about using GetFederationToken to create
-// temporary security credentials, see GetFederationToken—Federation Through a
-// Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken)
-// . You can use the credentials to access a resource that has a resource-based
+// of the IAM user. For more information, see [Session Policies]in the IAM User Guide. For
+// information about using GetFederationToken to create temporary security
+// credentials, see [GetFederationToken—Federation Through a Custom Identity Broker].
+//
+// You can use the credentials to access a resource that has a resource-based
// policy. If that policy specifically references the federated user session in the
// Principal element of the policy, the session has the permissions allowed by the
// policy. These permissions are granted in addition to the permissions granted by
-// the session policies. Tags (Optional) You can pass tag key-value pairs to your
-// session. These are called session tags. For more information about session tags,
-// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
-// in the IAM User Guide. You can create a mobile-based or browser-based app that
-// can authenticate users using a web identity provider like Login with Amazon,
-// Facebook, Google, or an OpenID Connect-compatible identity provider. In this
-// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/)
-// or AssumeRoleWithWebIdentity . For more information, see Federation Through a
-// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity)
-// in the IAM User Guide. An administrator must grant you the permissions necessary
-// to pass session tags. The administrator can also create granular permissions to
-// allow you to pass only specific session tags. For more information, see
-// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
-// in the IAM User Guide. Tag key–value pairs are not case sensitive, but case is
-// preserved. This means that you cannot have separate Department and department
-// tag keys. Assume that the user that you are federating has the Department =
-// Marketing tag and you pass the department = engineering session tag. Department
-// and department are not saved as separate tags, and the session tag passed in
-// the request takes precedence over the user tag.
+// the session policies.
+//
+// # Tags
+//
+// (Optional) You can pass tag key-value pairs to your session. These are called
+// session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM User
+// Guide.
+//
+// You can create a mobile-based or browser-based app that can authenticate users
+// using a web identity provider like Login with Amazon, Facebook, Google, or an
+// OpenID Connect-compatible identity provider. In this case, we recommend that you
+// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User
+// Guide.
+//
+// An administrator must grant you the permissions necessary to pass session tags.
+// The administrator can also create granular permissions to allow you to pass only
+// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide.
+//
+// Tag key–value pairs are not case sensitive, but case is preserved. This means
+// that you cannot have separate Department and department tag keys. Assume that
+// the user that you are federating has the Department = Marketing tag and you
+// pass the department = engineering session tag. Department and department are
+// not saved as separate tags, and the session tag passed in the request takes
+// precedence over the user tag.
+//
+// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity
+// [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Amazon Cognito]: http://aws.amazon.com/cognito/
+// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+// [GetFederationToken—Federation Through a Custom Identity Broker]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken
+// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison
+// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials
+// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html
+// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html
func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) {
if params == nil {
params = &GetFederationTokenInput{}
@@ -102,10 +128,11 @@ type GetFederationTokenInput struct {
// The name of the federated user. The name is used as an identifier for the
// temporary security credentials (such as Bob ). For example, you can reference
// the federated user name in a resource-based policy, such as in an Amazon S3
- // bucket policy. The regex used to validate this parameter is a string of
- // characters consisting of upper- and lower-case alphanumeric characters with no
- // spaces. You can also include underscores or any of the following characters:
- // =,.@-
+ // bucket policy.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
//
// This member is required.
Name *string
@@ -119,99 +146,127 @@ type GetFederationTokenInput struct {
DurationSeconds *int32
// An IAM policy in JSON format that you want to use as an inline session policy.
- // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // to this operation. You can pass a single JSON policy document to use as an
- // inline session policy. You can also specify up to 10 managed policy Amazon
- // Resource Names (ARNs) to use as managed session policies. This parameter is
- // optional. However, if you do not pass any session policies, then the resulting
- // federated user session has no permissions. When you pass session policies, the
- // session permissions are the intersection of the IAM user policies and the
- // session policies that you pass. This gives you a way to further restrict the
- // permissions for a federated user. You cannot use session policies to grant more
- // permissions than those that are defined in the permissions policy of the IAM
- // user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide. The resulting credentials can be used to access a
- // resource that has a resource-based policy. If that policy specifically
- // references the federated user session in the Principal element of the policy,
- // the session has the permissions allowed by the policy. These permissions are
- // granted in addition to the permissions that are granted by the session policies.
+ //
+ // You must pass an inline or managed [session policy] to this operation. You can pass a single
+ // JSON policy document to use as an inline session policy. You can also specify up
+ // to 10 managed policy Amazon Resource Names (ARNs) to use as managed session
+ // policies.
+ //
+ // This parameter is optional. However, if you do not pass any session policies,
+ // then the resulting federated user session has no permissions.
+ //
+ // When you pass session policies, the session permissions are the intersection of
+ // the IAM user policies and the session policies that you pass. This gives you a
+ // way to further restrict the permissions for a federated user. You cannot use
+ // session policies to grant more permissions than those that are defined in the
+ // permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User
+ // Guide.
+ //
+ // The resulting credentials can be used to access a resource that has a
+ // resource-based policy. If that policy specifically references the federated user
+ // session in the Principal element of the policy, the session has the permissions
+ // allowed by the policy. These permissions are granted in addition to the
+ // permissions that are granted by the session policies.
+ //
// The plaintext that you use for both inline and managed session policies can't
// exceed 2,048 characters. The JSON policy characters can be any ASCII character
// from the space character to the end of the valid character list (\u0020 through
// \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
- // return (\u000D) characters. An Amazon Web Services conversion compresses the
- // passed inline session policy, managed policy ARNs, and session tags into a
- // packed binary format that has a separate limit. Your request can fail for this
- // limit even if your plaintext meets the other requirements. The PackedPolicySize
- // response element indicates by percentage how close the policies and tags for
- // your request are to the upper size limit.
+ // return (\u000D) characters.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
Policy *string
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want to
// use as a managed session policy. The policies must exist in the same account as
- // the IAM user that is requesting federated access. You must pass an inline or
- // managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // to this operation. You can pass a single JSON policy document to use as an
- // inline session policy. You can also specify up to 10 managed policy Amazon
- // Resource Names (ARNs) to use as managed session policies. The plaintext that you
- // use for both inline and managed session policies can't exceed 2,048 characters.
- // You can provide up to 10 managed policy ARNs. For more information about ARNs,
- // see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // in the Amazon Web Services General Reference. This parameter is optional.
- // However, if you do not pass any session policies, then the resulting federated
- // user session has no permissions. When you pass session policies, the session
- // permissions are the intersection of the IAM user policies and the session
- // policies that you pass. This gives you a way to further restrict the permissions
- // for a federated user. You cannot use session policies to grant more permissions
- // than those that are defined in the permissions policy of the IAM user. For more
- // information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide. The resulting credentials can be used to access a
- // resource that has a resource-based policy. If that policy specifically
- // references the federated user session in the Principal element of the policy,
- // the session has the permissions allowed by the policy. These permissions are
- // granted in addition to the permissions that are granted by the session policies.
+ // the IAM user that is requesting federated access.
+ //
+ // You must pass an inline or managed [session policy] to this operation. You can pass a single
+ // JSON policy document to use as an inline session policy. You can also specify up
+ // to 10 managed policy Amazon Resource Names (ARNs) to use as managed session
+ // policies. The plaintext that you use for both inline and managed session
+ // policies can't exceed 2,048 characters. You can provide up to 10 managed policy
+ // ARNs. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web Services General
+ // Reference.
+ //
+ // This parameter is optional. However, if you do not pass any session policies,
+ // then the resulting federated user session has no permissions.
+ //
+ // When you pass session policies, the session permissions are the intersection of
+ // the IAM user policies and the session policies that you pass. This gives you a
+ // way to further restrict the permissions for a federated user. You cannot use
+ // session policies to grant more permissions than those that are defined in the
+ // permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User
+ // Guide.
+ //
+ // The resulting credentials can be used to access a resource that has a
+ // resource-based policy. If that policy specifically references the federated user
+ // session in the Principal element of the policy, the session has the permissions
+ // allowed by the policy. These permissions are granted in addition to the
+ // permissions that are granted by the session policies.
+ //
// An Amazon Web Services conversion compresses the passed inline session policy,
// managed policy ARNs, and session tags into a packed binary format that has a
// separate limit. Your request can fail for this limit even if your plaintext
// meets the other requirements. The PackedPolicySize response element indicates
// by percentage how close the policies and tags for your request are to the upper
// size limit.
+ //
+ // [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
PolicyArns []types.PolicyDescriptorType
// A list of session tags. Each session tag consists of a key name and an
- // associated value. For more information about session tags, see Passing Session
- // Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
- // in the IAM User Guide. This parameter is optional. You can pass up to 50 session
- // tags. The plaintext session tag keys can’t exceed 128 characters and the values
- // can’t exceed 256 characters. For these and additional limits, see IAM and STS
- // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
- // in the IAM User Guide. An Amazon Web Services conversion compresses the passed
- // inline session policy, managed policy ARNs, and session tags into a packed
- // binary format that has a separate limit. Your request can fail for this limit
- // even if your plaintext meets the other requirements. The PackedPolicySize
- // response element indicates by percentage how close the policies and tags for
- // your request are to the upper size limit. You can pass a session tag with the
- // same key as a tag that is already attached to the user you are federating. When
- // you do, session tags override a user tag with the same key. Tag key–value pairs
- // are not case sensitive, but case is preserved. This means that you cannot have
- // separate Department and department tag keys. Assume that the role has the
- // Department = Marketing tag and you pass the department = engineering session
- // tag. Department and department are not saved as separate tags, and the session
- // tag passed in the request takes precedence over the role tag.
+ // associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User
+ // Guide.
+ //
+ // This parameter is optional. You can pass up to 50 session tags. The plaintext
+ // session tag keys can’t exceed 128 characters and the values can’t exceed 256
+ // characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // You can pass a session tag with the same key as a tag that is already attached
+ // to the user you are federating. When you do, session tags override a user tag
+ // with the same key.
+ //
+ // Tag key–value pairs are not case sensitive, but case is preserved. This means
+ // that you cannot have separate Department and department tag keys. Assume that
+ // the role has the Department = Marketing tag and you pass the department =
+ // engineering session tag. Department and department are not saved as separate
+ // tags, and the session tag passed in the request takes precedence over the role
+ // tag.
+ //
+ // [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+ // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
Tags []types.Tag
noSmithyDocumentSerde
}
-// Contains the response to a successful GetFederationToken request, including
-// temporary Amazon Web Services credentials that can be used to make Amazon Web
-// Services requests.
+// Contains the response to a successful GetFederationToken request, including temporary Amazon Web
+// Services credentials that can be used to make Amazon Web Services requests.
type GetFederationTokenOutput struct {
// The temporary security credentials, which include an access key ID, a secret
- // access key, and a security (or session) token. The size of the security token
- // that STS API operations return is not fixed. We strongly recommend that you make
- // no assumptions about the maximum size.
+ // access key, and a security (or session) token.
+ //
+ // The size of the security token that STS API operations return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size.
Credentials *types.Credentials
// Identifiers for the federated user associated with the credentials (such as
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go
index 751fb147d4..097ccd8448 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go
@@ -15,43 +15,58 @@ import (
// IAM user. The credentials consist of an access key ID, a secret access key, and
// a security token. Typically, you use GetSessionToken if you want to use MFA to
// protect programmatic calls to specific Amazon Web Services API operations like
-// Amazon EC2 StopInstances . MFA-enabled IAM users must call GetSessionToken and
-// submit an MFA code that is associated with their MFA device. Using the temporary
-// security credentials that the call returns, IAM users can then make programmatic
-// calls to API operations that require MFA authentication. An incorrect MFA code
-// causes the API to return an access denied error. For a comparison of
-// GetSessionToken with the other API operations that produce temporary
-// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
-// in the IAM User Guide. No permissions are required for users to perform this
-// operation. The purpose of the sts:GetSessionToken operation is to authenticate
-// the user using MFA. You cannot use policies to control authentication
-// operations. For more information, see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html)
-// in the IAM User Guide. Session Duration The GetSessionToken operation must be
-// called by using the long-term Amazon Web Services security credentials of an IAM
-// user. Credentials that are created by IAM users are valid for the duration that
-// you specify. This duration can range from 900 seconds (15 minutes) up to a
-// maximum of 129,600 seconds (36 hours), with a default of 43,200 seconds (12
-// hours). Credentials based on account credentials can range from 900 seconds (15
-// minutes) up to 3,600 seconds (1 hour), with a default of 1 hour. Permissions The
-// temporary security credentials created by GetSessionToken can be used to make
-// API calls to any Amazon Web Services service with the following exceptions:
+// Amazon EC2 StopInstances .
+//
+// MFA-enabled IAM users must call GetSessionToken and submit an MFA code that is
+// associated with their MFA device. Using the temporary security credentials that
+// the call returns, IAM users can then make programmatic calls to API operations
+// that require MFA authentication. An incorrect MFA code causes the API to return
+// an access denied error. For a comparison of GetSessionToken with the other API
+// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide.
+//
+// No permissions are required for users to perform this operation. The purpose of
+// the sts:GetSessionToken operation is to authenticate the user using MFA. You
+// cannot use policies to control authentication operations. For more information,
+// see [Permissions for GetSessionToken]in the IAM User Guide.
+//
+// # Session Duration
+//
+// The GetSessionToken operation must be called by using the long-term Amazon Web
+// Services security credentials of an IAM user. Credentials that are created by
+// IAM users are valid for the duration that you specify. This duration can range
+// from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours),
+// with a default of 43,200 seconds (12 hours). Credentials based on account
+// credentials can range from 900 seconds (15 minutes) up to 3,600 seconds (1
+// hour), with a default of 1 hour.
+//
+// # Permissions
+//
+// The temporary security credentials created by GetSessionToken can be used to
+// make API calls to any Amazon Web Services service with the following exceptions:
+//
// - You cannot call any IAM API operations unless MFA authentication
// information is included in the request.
+//
// - You cannot call any STS API except AssumeRole or GetCallerIdentity .
//
// The credentials that GetSessionToken returns are based on permissions
// associated with the IAM user whose credentials were used to call the operation.
-// The temporary credentials have the same permissions as the IAM user. Although it
-// is possible to call GetSessionToken using the security credentials of an Amazon
-// Web Services account root user rather than an IAM user, we do not recommend it.
-// If GetSessionToken is called using root user credentials, the temporary
-// credentials have root user permissions. For more information, see Safeguard
-// your root user credentials and don't use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials)
-// in the IAM User Guide For more information about using GetSessionToken to
-// create temporary credentials, see Temporary Credentials for Users in Untrusted
-// Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
-// in the IAM User Guide.
+// The temporary credentials have the same permissions as the IAM user.
+//
+// Although it is possible to call GetSessionToken using the security credentials
+// of an Amazon Web Services account root user rather than an IAM user, we do not
+// recommend it. If GetSessionToken is called using root user credentials, the
+// temporary credentials have root user permissions. For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in
+// the IAM User Guide
+//
+// For more information about using GetSessionToken to create temporary
+// credentials, see [Temporary Credentials for Users in Untrusted Environments]in the IAM User Guide.
+//
+// [Permissions for GetSessionToken]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html
+// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison
+// [Temporary Credentials for Users in Untrusted Environments]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken
+// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials
+// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html
func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) {
if params == nil {
params = &GetSessionTokenInput{}
@@ -83,10 +98,11 @@ type GetSessionTokenInput struct {
// number for a hardware device (such as GAHT12345678 ) or an Amazon Resource Name
// (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user ). You
// can find the device for an IAM user by going to the Amazon Web Services
- // Management Console and viewing the user's security credentials. The regex used
- // to validate this parameter is a string of characters consisting of upper- and
- // lower-case alphanumeric characters with no spaces. You can also include
- // underscores or any of the following characters: =,.@:/-
+ // Management Console and viewing the user's security credentials.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@:/-
SerialNumber *string
// The value provided by the MFA device, if MFA is required. If any policy
@@ -94,22 +110,24 @@ type GetSessionTokenInput struct {
// authentication is required, the user must provide a code when requesting a set
// of temporary security credentials. A user who fails to provide the code receives
// an "access denied" response when requesting resources that require MFA
- // authentication. The format for this parameter, as described by its regex
- // pattern, is a sequence of six numeric digits.
+ // authentication.
+ //
+ // The format for this parameter, as described by its regex pattern, is a sequence
+ // of six numeric digits.
TokenCode *string
noSmithyDocumentSerde
}
-// Contains the response to a successful GetSessionToken request, including
-// temporary Amazon Web Services credentials that can be used to make Amazon Web
-// Services requests.
+// Contains the response to a successful GetSessionToken request, including temporary Amazon Web
+// Services credentials that can be used to make Amazon Web Services requests.
type GetSessionTokenOutput struct {
// The temporary security credentials, which include an access key ID, a secret
- // access key, and a security (or session) token. The size of the security token
- // that STS API operations return is not fixed. We strongly recommend that you make
- // no assumptions about the maximum size.
+ // access key, and a security (or session) token.
+ //
+ // The size of the security token that STS API operations return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size.
Credentials *types.Credentials
// Metadata pertaining to the operation's result.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go
index 5d634ce35c..7e4346ec9f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go
@@ -20,8 +20,17 @@ import (
"io"
"strconv"
"strings"
+ "time"
)
+func deserializeS3Expires(v string) (*time.Time, error) {
+ t, err := smithytime.ParseHTTPDate(v)
+ if err != nil {
+ return nil, nil
+ }
+ return &t, nil
+}
+
type awsAwsquery_deserializeOpAssumeRole struct {
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go
index d963fd8d19..cbb19c7f66 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go
@@ -3,9 +3,11 @@
// Package sts provides the API client, operations, and parameter types for AWS
// Security Token Service.
//
-// Security Token Service Security Token Service (STS) enables you to request
-// temporary, limited-privilege credentials for users. This guide provides
-// descriptions of the STS API. For more information about using this service, see
-// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html)
-// .
+// # Security Token Service
+//
+// Security Token Service (STS) enables you to request temporary,
+// limited-privilege credentials for users. This guide provides descriptions of the
+// STS API. For more information about using this service, see [Temporary Security Credentials].
+//
+// [Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html
package sts
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
index 6e0f31d271..f8c5b4e916 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
@@ -3,4 +3,4 @@
package sts
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.28.6"
+const goModuleVersion = "1.28.10"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go
index 5c1be79f8c..bb291161aa 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go
@@ -50,8 +50,10 @@ type Options struct {
// Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a
// value for this field will likely prevent you from using any endpoint-related
// service features released after the introduction of EndpointResolverV2 and
- // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom
- // endpoint, set the client option BaseEndpoint instead.
+ // BaseEndpoint.
+ //
+ // To migrate an EndpointResolver implementation that uses a custom endpoint, set
+ // the client option BaseEndpoint instead.
EndpointResolver EndpointResolver
// Resolves the endpoint used for a particular service operation. This should be
@@ -70,17 +72,20 @@ type Options struct {
// RetryMaxAttempts specifies the maximum number attempts an API client will call
// an operation that fails with a retryable error. A value of 0 is ignored, and
// will not be used to configure the API client created default retryer, or modify
- // per operation call's retry max attempts. If specified in an operation call's
- // functional options with a value that is different than the constructed client's
- // Options, the Client's Retryer will be wrapped to use the operation's specific
- // RetryMaxAttempts value.
+ // per operation call's retry max attempts.
+ //
+ // If specified in an operation call's functional options with a value that is
+ // different than the constructed client's Options, the Client's Retryer will be
+ // wrapped to use the operation's specific RetryMaxAttempts value.
RetryMaxAttempts int
// RetryMode specifies the retry mode the API client will be created with, if
- // Retryer option is not also specified. When creating a new API Clients this
- // member will only be used if the Retryer Options member is nil. This value will
- // be ignored if Retryer is not nil. Currently does not support per operation call
- // overrides, may in the future.
+ // Retryer option is not also specified.
+ //
+ // When creating a new API Clients this member will only be used if the Retryer
+ // Options member is nil. This value will be ignored if Retryer is not nil.
+ //
+ // Currently does not support per operation call overrides, may in the future.
RetryMode aws.RetryMode
// Retryer guides how HTTP requests should be retried in case of recoverable
@@ -97,8 +102,9 @@ type Options struct {
// The initial DefaultsMode used when the client options were constructed. If the
// DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
- // value was at that point in time. Currently does not support per operation call
- // overrides, may in the future.
+ // value was at that point in time.
+ //
+ // Currently does not support per operation call overrides, may in the future.
resolvedDefaultsMode aws.DefaultsMode
// The HTTP client to invoke API calls with. Defaults to client's default HTTP
@@ -143,6 +149,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for
// this field will likely prevent you from using any endpoint-related service
// features released after the introduction of EndpointResolverV2 and BaseEndpoint.
+//
// To migrate an EndpointResolver implementation that uses a custom endpoint, set
// the client option BaseEndpoint instead.
func WithEndpointResolver(v EndpointResolver) func(*Options) {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go
index 097875b279..9573a4b646 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go
@@ -65,9 +65,10 @@ func (e *IDPCommunicationErrorException) ErrorCode() string {
func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// The identity provider (IdP) reported that authentication failed. This might be
-// because the claim is invalid. If this error is returned for the
-// AssumeRoleWithWebIdentity operation, it can also mean that the claim has expired
-// or has been explicitly revoked.
+// because the claim is invalid.
+//
+// If this error is returned for the AssumeRoleWithWebIdentity operation, it can
+// also mean that the claim has expired or has been explicitly revoked.
type IDPRejectedClaimException struct {
Message *string
@@ -183,11 +184,13 @@ func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { retu
// compresses the session policy document, session policy ARNs, and session tags
// into a packed binary format that has a separate limit. The error message
// indicates by percentage how close the policies and tags are to the upper size
-// limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
-// in the IAM User Guide. You could receive this error even though you meet other
-// defined session policy and session tag limits. For more information, see IAM
-// and STS Entity Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
-// in the IAM User Guide.
+// limit. For more information, see [Passing Session Tags in STS]in the IAM User Guide.
+//
+// You could receive this error even though you meet other defined session policy
+// and session tag limits. For more information, see [IAM and STS Entity Character Limits]in the IAM User Guide.
+//
+// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+// [IAM and STS Entity Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length
type PackedPolicyTooLargeException struct {
Message *string
@@ -215,9 +218,10 @@ func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return
// STS is not activated in the requested region for the account that is being
// asked to generate credentials. The account administrator must use the IAM
-// console to activate STS in that region. For more information, see Activating
-// and Deactivating Amazon Web Services STS in an Amazon Web Services Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
-// in the IAM User Guide.
+// console to activate STS in that region. For more information, see [Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region]in the IAM
+// User Guide.
+//
+// [Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html
type RegionDisabledException struct {
Message *string
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go
index e3701d11d1..dff7a3c2e7 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go
@@ -11,10 +11,11 @@ import (
// returns.
type AssumedRoleUser struct {
- // The ARN of the temporary security credentials that are returned from the
- // AssumeRole action. For more information about ARNs and how to use them in
- // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
- // in the IAM User Guide.
+ // The ARN of the temporary security credentials that are returned from the AssumeRole
+ // action. For more information about ARNs and how to use them in policies, see [IAM Identifiers]in
+ // the IAM User Guide.
+ //
+ // [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html
//
// This member is required.
Arn *string
@@ -61,8 +62,9 @@ type FederatedUser struct {
// The ARN that specifies the federated user that is associated with the
// credentials. For more information about ARNs and how to use them in policies,
- // see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
- // in the IAM User Guide.
+ // see [IAM Identifiers]in the IAM User Guide.
+ //
+ // [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html
//
// This member is required.
Arn *string
@@ -81,9 +83,10 @@ type FederatedUser struct {
type PolicyDescriptorType struct {
// The Amazon Resource Name (ARN) of the IAM managed policy to use as a session
- // policy for the role. For more information about ARNs, see Amazon Resource Names
- // (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // in the Amazon Web Services General Reference.
+ // policy for the role. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web
+ // Services General Reference.
+ //
+ // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
Arn *string
noSmithyDocumentSerde
@@ -107,23 +110,30 @@ type ProvidedContext struct {
// You can pass custom key-value pair attributes when you assume a role or
// federate a user. These are called session tags. You can then use the session
-// tags to control access to resources. For more information, see Tagging Amazon
-// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
-// in the IAM User Guide.
+// tags to control access to resources. For more information, see [Tagging Amazon Web Services STS Sessions]in the IAM User
+// Guide.
+//
+// [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
type Tag struct {
- // The key for a session tag. You can pass up to 50 session tags. The plain text
- // session tag keys can’t exceed 128 characters. For these and additional limits,
- // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
- // in the IAM User Guide.
+ // The key for a session tag.
+ //
+ // You can pass up to 50 session tags. The plain text session tag keys can’t
+ // exceed 128 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User
+ // Guide.
+ //
+ // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
//
// This member is required.
Key *string
- // The value for a session tag. You can pass up to 50 session tags. The plain text
- // session tag values can’t exceed 256 characters. For these and additional limits,
- // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
- // in the IAM User Guide.
+ // The value for a session tag.
+ //
+ // You can pass up to 50 session tags. The plain text session tag values can’t
+ // exceed 256 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User
+ // Guide.
+ //
+ // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
//
// This member is required.
Value *string
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
index efc6ff5fdf..c6e803286d 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -74,7 +74,9 @@ const (
)
// AWS ISOE (Europe) partition's regions.
-const ()
+const (
+ EuIsoeWest1RegionID = "eu-isoe-west-1" // EU ISOE West.
+)
// AWS ISOF partition's regions.
const ()
@@ -298,6 +300,12 @@ var awsPartition = partition{
endpointKey{
Region: "ca-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "access-analyzer-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -331,6 +339,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "access-analyzer-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -807,6 +824,12 @@ var awsPartition = partition{
},
"airflow": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
@@ -831,6 +854,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -840,6 +866,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@@ -849,6 +878,9 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -1045,6 +1077,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -1057,6 +1092,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -4798,9 +4839,15 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
endpointKey{
Region: "bedrock-ap-northeast-1",
}: endpoint{
@@ -4809,6 +4856,14 @@ var awsPartition = partition{
Region: "ap-northeast-1",
},
},
+ endpointKey{
+ Region: "bedrock-ap-south-1",
+ }: endpoint{
+ Hostname: "bedrock.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
endpointKey{
Region: "bedrock-ap-southeast-1",
}: endpoint{
@@ -4817,6 +4872,14 @@ var awsPartition = partition{
Region: "ap-southeast-1",
},
},
+ endpointKey{
+ Region: "bedrock-ap-southeast-2",
+ }: endpoint{
+ Hostname: "bedrock.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
endpointKey{
Region: "bedrock-eu-central-1",
}: endpoint{
@@ -4825,6 +4888,14 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
+ endpointKey{
+ Region: "bedrock-eu-west-1",
+ }: endpoint{
+ Hostname: "bedrock.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
endpointKey{
Region: "bedrock-eu-west-3",
}: endpoint{
@@ -4857,6 +4928,14 @@ var awsPartition = partition{
Region: "ap-northeast-1",
},
},
+ endpointKey{
+ Region: "bedrock-runtime-ap-south-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
endpointKey{
Region: "bedrock-runtime-ap-southeast-1",
}: endpoint{
@@ -4865,6 +4944,14 @@ var awsPartition = partition{
Region: "ap-southeast-1",
},
},
+ endpointKey{
+ Region: "bedrock-runtime-ap-southeast-2",
+ }: endpoint{
+ Hostname: "bedrock-runtime.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
endpointKey{
Region: "bedrock-runtime-eu-central-1",
}: endpoint{
@@ -4873,6 +4960,14 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
+ endpointKey{
+ Region: "bedrock-runtime-eu-west-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
endpointKey{
Region: "bedrock-runtime-eu-west-3",
}: endpoint{
@@ -4932,6 +5027,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-3",
}: endpoint{},
@@ -4959,6 +5057,9 @@ var awsPartition = partition{
},
"braket": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
@@ -6886,6 +6987,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -6913,6 +7017,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -7025,6 +7132,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -7052,6 +7162,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -7941,6 +8054,24 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "controltower-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1-fips",
+ }: endpoint{
+ Hostname: "controltower-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -13576,6 +13707,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "fms-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -13702,6 +13842,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "fms-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-eu-central-1",
}: endpoint{
@@ -14087,6 +14236,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "fsx-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fsx-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -14120,6 +14278,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "fsx-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-prod-ca-central-1",
}: endpoint{
@@ -14129,6 +14296,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-prod-ca-west-1",
+ }: endpoint{
+ Hostname: "fsx-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-prod-us-east-1",
}: endpoint{
@@ -14228,6 +14404,24 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "prod-ca-west-1",
+ }: endpoint{
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "prod-ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fsx-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "prod-us-east-1",
}: endpoint{
@@ -15252,13 +15446,6 @@ var awsPartition = partition{
}: endpoint{},
},
},
- "honeycode": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
"iam": service{
PartitionEndpoint: "aws-global",
IsRegionalized: boxedFalse,
@@ -17079,6 +17266,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "kafka-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kafka-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -17112,6 +17308,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "kafka-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -19148,6 +19353,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -25379,6 +25587,12 @@ var awsPartition = partition{
},
"resource-explorer-2": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
@@ -25391,6 +25605,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -25400,15 +25617,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -25418,6 +25650,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -25929,6 +26167,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -25938,18 +26179,27 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -25959,6 +26209,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -26743,6 +26996,44 @@ var awsPartition = partition{
},
},
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{
+ Hostname: "s3-control.af-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
+ endpointKey{
+ Region: "af-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.af-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{
+ Hostname: "s3-control.ap-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.ap-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
endpointKey{
Region: "ap-northeast-1",
}: endpoint{
@@ -26819,6 +27110,25 @@ var awsPartition = partition{
Region: "ap-south-1",
},
},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "s3-control.ap-south-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.ap-south-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
@@ -26857,6 +27167,44 @@ var awsPartition = partition{
Region: "ap-southeast-2",
},
},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{
+ Hostname: "s3-control.ap-southeast-3.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-3",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.ap-southeast-3.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-3",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "s3-control.ap-southeast-4.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.ap-southeast-4.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{
@@ -26906,6 +27254,55 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Hostname: "s3-control.ca-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.ca-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.ca-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.dualstack.ca-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ },
+ endpointKey{
+ Region: "ca-west-1-fips",
+ }: endpoint{
+ Hostname: "s3-control-fips.ca-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{
@@ -26925,6 +27322,25 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "s3-control.eu-central-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.eu-central-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
endpointKey{
Region: "eu-north-1",
}: endpoint{
@@ -26944,6 +27360,44 @@ var awsPartition = partition{
Region: "eu-north-1",
},
},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{
+ Hostname: "s3-control.eu-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.eu-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "s3-control.eu-south-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.eu-south-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{
@@ -27001,6 +27455,63 @@ var awsPartition = partition{
Region: "eu-west-3",
},
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "s3-control.il-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.il-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "s3-control.me-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.me-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{
+ Hostname: "s3-control.me-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.me-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
endpointKey{
Region: "sa-east-1",
}: endpoint{
@@ -28313,21 +28824,85 @@ var awsPartition = partition{
}: endpoint{
Protocols: []string{"https"},
},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{
Protocols: []string{"https"},
},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{
Protocols: []string{"https"},
},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{
Protocols: []string{"https"},
},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"servicecatalog": service{
@@ -30037,6 +30612,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -32594,6 +33172,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "transfer-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "transfer-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -32627,6 +33214,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "transfer-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -32859,6 +33455,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -32892,6 +33497,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -32928,6 +33542,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -33132,6 +33749,12 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -34164,6 +34787,23 @@ var awsPartition = partition{
Region: "ca-central-1",
},
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Hostname: "wafv2.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "wafv2-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{
@@ -34408,6 +35048,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "wafv2-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-eu-central-1",
}: endpoint{
@@ -36009,6 +36658,19 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "entitlement.marketplace": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "entitlement-marketplace.cn-northwest-1.amazonaws.com.cn",
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
"es": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -36446,7 +37108,7 @@ var awscnPartition = partition{
endpointKey{
Region: "cn-northwest-1",
}: endpoint{
- Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn",
+ Hostname: "mediaconvert.cn-northwest-1.amazonaws.com.cn",
CredentialScope: credentialScope{
Region: "cn-northwest-1",
},
@@ -38174,13 +38836,45 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{
+ Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com",
Protocols: []string{"http", "https"},
+
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
+ Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com",
Protocols: []string{"http", "https"},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+
+ Deprecated: boxedTrue,
+ },
},
},
"backup": service{
@@ -38894,9 +39588,39 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "controltower-fips.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "controltower-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "controltower-fips.us-gov-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "controltower-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"data-ats.iot": service{
@@ -39776,6 +40500,15 @@ var awsusgovPartition = partition{
},
"email": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "email-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
@@ -39785,6 +40518,15 @@ var awsusgovPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "email-fips.us-gov-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
@@ -39798,22 +40540,82 @@ var awsusgovPartition = partition{
},
"emr-containers": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "emr-containers.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "emr-containers.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "emr-containers.us-gov-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "emr-containers.us-gov-west-1.amazonaws.com",
+ },
},
},
"emr-serverless": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "emr-serverless.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "emr-serverless.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "emr-serverless.us-gov-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "emr-serverless.us-gov-west-1.amazonaws.com",
+ },
},
},
"es": service{
@@ -41214,6 +42016,16 @@ var awsusgovPartition = partition{
}: endpoint{},
},
},
+ "license-manager-user-subscriptions": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
"logs": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -43878,6 +44690,46 @@ var awsusgovPartition = partition{
},
},
},
+ "verifiedpermissions": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com",
+ },
+ },
+ },
"waf-regional": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -45188,6 +46040,114 @@ var awsisoPartition = partition{
},
},
},
+ "s3-control": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{
+ Hostname: "s3-control.us-iso-east-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.us-iso-east-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.dualstack.us-iso-east-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-east-1-fips",
+ }: endpoint{
+ Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{
+ Hostname: "s3-control.us-iso-west-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.us-iso-west-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.dualstack.us-iso-west-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-west-1-fips",
+ }: endpoint{
+ Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
"s3-outposts": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -45220,6 +46180,9 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
},
},
"sns": service{
@@ -45738,6 +46701,13 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
+ "firehose": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
"glacier": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -46058,6 +47028,65 @@ var awsisobPartition = partition{
},
},
},
+ "s3-control": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{
+ Hostname: "s3-control.us-isob-east-1.sc2s.sgov.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.us-isob-east-1.sc2s.sgov.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.dualstack.us-isob-east-1.sc2s.sgov.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-isob-east-1-fips",
+ }: endpoint{
+ Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
"s3-outposts": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -46255,7 +47284,11 @@ var awsisoePartition = partition{
SignatureVersions: []string{"v4"},
},
},
- Regions: regions{},
+ Regions: regions{
+ "eu-isoe-west-1": region{
+ Description: "EU ISOE West",
+ },
+ },
Services: services{},
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
index aa917adc02..1c4fe84e0f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
-const SDKVersion = "1.51.17"
+const SDKVersion = "1.53.11"
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
index 058334053c..2ca0b19db7 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
@@ -122,8 +122,8 @@ func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix stri
}
func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
- // If it's empty, generate an empty value
- if !value.IsNil() && value.Len() == 0 {
+ // If it's empty, and not ec2, generate an empty value
+ if !value.IsNil() && value.Len() == 0 && !q.isEC2 {
v.Set(prefix, "")
return nil
}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
index cedc4d5ba5..687ca0252e 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
@@ -4006,6 +4006,11 @@ func (c *EC2) CancelSpotFleetRequestsRequest(input *CancelSpotFleetRequestsInput
// enters the cancelled_running state and the instances continue to run until
// they are interrupted or you terminate them manually.
//
+// Restrictions
+//
+// - You can delete up to 100 fleets in a single request. If you exceed the
+// specified number, no fleets are deleted.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -5318,10 +5323,10 @@ func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *requ
// that you specify 2. Broadcast and multicast are not supported. For more
// information about NetBIOS node types, see RFC 2132 (http://www.ietf.org/rfc/rfc2132.txt).
//
-// - ipv6-preferred-lease-time - A value (in seconds, minutes, hours, or
-// years) for how frequently a running instance with an IPv6 assigned to
-// it goes through DHCPv6 lease renewal. Acceptable values are between 140
-// and 2147483647 seconds (approximately 68 years). If no value is entered,
+// - ipv6-address-preferred-lease-time - A value (in seconds, minutes, hours,
+// or years) for how frequently a running instance with an IPv6 assigned
+// to it goes through DHCPv6 lease renewal. Acceptable values are between
+// 140 and 2147483647 seconds (approximately 68 years). If no value is entered,
// the default lease time is 140 seconds. If you use long-term addressing
// for EC2 instances, you can increase the lease time and avoid frequent
// lease renewal requests. Lease renewal typically occurs when half of the
@@ -6548,10 +6553,10 @@ func (c *EC2) CreateLaunchTemplateRequest(input *CreateLaunchTemplateInput) (req
// see Launch an instance from a launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
-// If you want to clone an existing launch template as the basis for creating
-// a new launch template, you can use the Amazon EC2 console. The API, SDKs,
-// and CLI do not support cloning a template. For more information, see Create
-// a launch template from an existing launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#create-launch-template-from-existing-launch-template)
+// To clone an existing launch template as the basis for a new launch template,
+// use the Amazon EC2 console. The API, SDKs, and CLI do not support cloning
+// a template. For more information, see Create a launch template from an existing
+// launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#create-launch-template-from-existing-launch-template)
// in the Amazon Elastic Compute Cloud User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -6625,15 +6630,17 @@ func (c *EC2) CreateLaunchTemplateVersionRequest(input *CreateLaunchTemplateVers
// CreateLaunchTemplateVersion API operation for Amazon Elastic Compute Cloud.
//
-// Creates a new version of a launch template. You can specify an existing version
-// of launch template from which to base the new version.
+// Creates a new version of a launch template. You must specify an existing
+// launch template, either by name or ID. You can determine whether the new
+// version inherits parameters from a source version, and add or overwrite parameters
+// as needed.
//
// Launch template versions are numbered in the order in which they are created.
-// You cannot specify, change, or replace the numbering of launch template versions.
+// You can't specify, change, or replace the numbering of launch template versions.
//
// Launch templates are immutable; after you create a launch template, you can't
// modify it. Instead, you can create a new version of the launch template that
-// includes any changes you require.
+// includes the changes that you require.
//
// For more information, see Modify a launch template (manage launch template
// versions) (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#manage-launch-template-versions)
@@ -11804,17 +11811,22 @@ func (c *EC2) DeleteFleetsRequest(input *DeleteFleetsInput) (req *request.Reques
// manually.
//
// For instant fleets, EC2 Fleet must terminate the instances when the fleet
-// is deleted. A deleted instant fleet with running instances is not supported.
+// is deleted. Up to 1000 instances can be terminated in a single request to
+// delete instant fleets. A deleted instant fleet with running instances is
+// not supported.
//
// Restrictions
//
-// - You can delete up to 25 instant fleets in a single request. If you exceed
-// this number, no instant fleets are deleted and an error is returned. There
-// is no restriction on the number of fleets of type maintain or request
-// that can be deleted in a single request.
+// - You can delete up to 25 fleets of type instant in a single request.
+//
+// - You can delete up to 100 fleets of type maintain or request in a single
+// request.
+//
+// - You can delete up to 125 fleets in a single request, provided you do
+// not exceed the quota for each fleet type, as specified above.
//
-// - Up to 1000 instances can be terminated in a single request to delete
-// instant fleets.
+// - If you exceed the specified number of fleets to delete, no fleets are
+// deleted.
//
// For more information, see Delete an EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/manage-ec2-fleet.html#delete-fleet)
// in the Amazon EC2 User Guide.
@@ -15561,9 +15573,10 @@ func (c *EC2) DeleteTransitGatewayRouteTableRequest(input *DeleteTransitGatewayR
// DeleteTransitGatewayRouteTable API operation for Amazon Elastic Compute Cloud.
//
-// Deletes the specified transit gateway route table. You must disassociate
-// the route table from any transit gateway route tables before you can delete
-// it.
+// Deletes the specified transit gateway route table. If there are any route
+// tables associated with the transit gateway route table, you must first run
+// DisassociateRouteTable before you can delete the transit gateway route table.
+// This removes any route tables associated with the transit gateway route table.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -22199,6 +22212,9 @@ func (c *EC2) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Re
// AMI are terminated, specifying the ID of the image will eventually return
// an error indicating that the AMI ID cannot be found.
//
+// We strongly recommend using only paginated requests. Unpaginated requests
+// are susceptible to throttling and timeouts.
+//
// The order of the elements in the response, including those within nested
// structures, might vary. Applications should not assume the elements appear
// in a particular order.
@@ -23467,9 +23483,9 @@ func (c *EC2) DescribeInstanceTypeOfferingsRequest(input *DescribeInstanceTypeOf
// DescribeInstanceTypeOfferings API operation for Amazon Elastic Compute Cloud.
//
-// Returns a list of all instance types offered. The results can be filtered
-// by location (Region or Availability Zone). If no location is specified, the
-// instance types offered in the current Region are returned.
+// Lists the instance types that are offered for the specified location. If
+// no location is specified, the default is to list the instance types that
+// are offered in the current Region.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -23599,8 +23615,8 @@ func (c *EC2) DescribeInstanceTypesRequest(input *DescribeInstanceTypesInput) (r
// DescribeInstanceTypes API operation for Amazon Elastic Compute Cloud.
//
-// Describes the details of the instance types that are offered in a location.
-// The results can be filtered by the attributes of the instance types.
+// Describes the specified instance types. By default, all instance types for
+// the current Region are described. Alternatively, you can filter the results.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -23751,6 +23767,9 @@ func (c *EC2) DescribeInstancesRequest(input *DescribeInstancesInput) (req *requ
// If you describe instances and specify only instance IDs that are in an unaffected
// zone, the call works normally.
//
+// We strongly recommend using only paginated requests. Unpaginated requests
+// are susceptible to throttling and timeouts.
+//
// The order of the elements in the response, including those within nested
// structures, might vary. Applications should not assume the elements appear
// in a particular order.
@@ -27463,6 +27482,9 @@ func (c *EC2) DescribeNetworkInterfacesRequest(input *DescribeNetworkInterfacesI
// you use pagination or one of the following filters: group-id, mac-address,
// private-dns-name, private-ip-address, private-dns-name, subnet-id, or vpc-id.
//
+// We strongly recommend using only paginated requests. Unpaginated requests
+// are susceptible to throttling and timeouts.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -29749,6 +29771,9 @@ func (c *EC2) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *requ
// For more information about EBS snapshots, see Amazon EBS snapshots (https://docs.aws.amazon.com/ebs/latest/userguide/ebs-snapshots.html)
// in the Amazon EBS User Guide.
//
+// We strongly recommend using only paginated requests. Unpaginated requests
+// are susceptible to throttling and timeouts.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -30939,6 +30964,9 @@ func (c *EC2) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Reques
// For more information about tags, see Tag your Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
+// We strongly recommend using only paginated requests. Unpaginated requests
+// are susceptible to throttling and timeouts.
+//
// The order of the elements in the response, including those within nested
// structures, might vary. Applications should not assume the elements appear
// in a particular order.
@@ -33803,6 +33831,9 @@ func (c *EC2) DescribeVolumesRequest(input *DescribeVolumesInput) (req *request.
// For more information about EBS volumes, see Amazon EBS volumes (https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volumes.html)
// in the Amazon EBS User Guide.
//
+// We strongly recommend using only paginated requests. Unpaginated requests
+// are susceptible to throttling and timeouts.
+//
// The order of the elements in the response, including those within nested
// structures, might vary. Applications should not assume the elements appear
// in a particular order.
@@ -36560,6 +36591,87 @@ func (c *EC2) DisableImageDeprecationWithContext(ctx aws.Context, input *Disable
return out, req.Send()
}
+const opDisableImageDeregistrationProtection = "DisableImageDeregistrationProtection"
+
+// DisableImageDeregistrationProtectionRequest generates a "aws/request.Request" representing the
+// client's request for the DisableImageDeregistrationProtection operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DisableImageDeregistrationProtection for more information on using the DisableImageDeregistrationProtection
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DisableImageDeregistrationProtectionRequest method.
+// req, resp := client.DisableImageDeregistrationProtectionRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableImageDeregistrationProtection
+func (c *EC2) DisableImageDeregistrationProtectionRequest(input *DisableImageDeregistrationProtectionInput) (req *request.Request, output *DisableImageDeregistrationProtectionOutput) {
+ op := &request.Operation{
+ Name: opDisableImageDeregistrationProtection,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DisableImageDeregistrationProtectionInput{}
+ }
+
+ output = &DisableImageDeregistrationProtectionOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DisableImageDeregistrationProtection API operation for Amazon Elastic Compute Cloud.
+//
+// Disables deregistration protection for an AMI. When deregistration protection
+// is disabled, the AMI can be deregistered.
+//
+// If you chose to include a 24-hour cooldown period when you enabled deregistration
+// protection for the AMI, then, when you disable deregistration protection,
+// you won’t immediately be able to deregister the AMI.
+//
+// For more information, see Protect an AMI from deregistration (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/deregister-ami.html#ami-deregistration-protection)
+// in the Amazon EC2 User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation DisableImageDeregistrationProtection for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableImageDeregistrationProtection
+func (c *EC2) DisableImageDeregistrationProtection(input *DisableImageDeregistrationProtectionInput) (*DisableImageDeregistrationProtectionOutput, error) {
+ req, out := c.DisableImageDeregistrationProtectionRequest(input)
+ return out, req.Send()
+}
+
+// DisableImageDeregistrationProtectionWithContext is the same as DisableImageDeregistrationProtection with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DisableImageDeregistrationProtection for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) DisableImageDeregistrationProtectionWithContext(ctx aws.Context, input *DisableImageDeregistrationProtectionInput, opts ...request.Option) (*DisableImageDeregistrationProtectionOutput, error) {
+ req, out := c.DisableImageDeregistrationProtectionRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opDisableIpamOrganizationAdminAccount = "DisableIpamOrganizationAdminAccount"
// DisableIpamOrganizationAdminAccountRequest generates a "aws/request.Request" representing the
@@ -38893,6 +39005,86 @@ func (c *EC2) EnableImageDeprecationWithContext(ctx aws.Context, input *EnableIm
return out, req.Send()
}
+const opEnableImageDeregistrationProtection = "EnableImageDeregistrationProtection"
+
+// EnableImageDeregistrationProtectionRequest generates a "aws/request.Request" representing the
+// client's request for the EnableImageDeregistrationProtection operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See EnableImageDeregistrationProtection for more information on using the EnableImageDeregistrationProtection
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the EnableImageDeregistrationProtectionRequest method.
+// req, resp := client.EnableImageDeregistrationProtectionRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableImageDeregistrationProtection
+func (c *EC2) EnableImageDeregistrationProtectionRequest(input *EnableImageDeregistrationProtectionInput) (req *request.Request, output *EnableImageDeregistrationProtectionOutput) {
+ op := &request.Operation{
+ Name: opEnableImageDeregistrationProtection,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &EnableImageDeregistrationProtectionInput{}
+ }
+
+ output = &EnableImageDeregistrationProtectionOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// EnableImageDeregistrationProtection API operation for Amazon Elastic Compute Cloud.
+//
+// Enables deregistration protection for an AMI. When deregistration protection
+// is enabled, the AMI can't be deregistered.
+//
+// To allow the AMI to be deregistered, you must first disable deregistration
+// protection using DisableImageDeregistrationProtection.
+//
+// For more information, see Protect an AMI from deregistration (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/deregister-ami.html#ami-deregistration-protection)
+// in the Amazon EC2 User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation EnableImageDeregistrationProtection for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableImageDeregistrationProtection
+func (c *EC2) EnableImageDeregistrationProtection(input *EnableImageDeregistrationProtectionInput) (*EnableImageDeregistrationProtectionOutput, error) {
+ req, out := c.EnableImageDeregistrationProtectionRequest(input)
+ return out, req.Send()
+}
+
+// EnableImageDeregistrationProtectionWithContext is the same as EnableImageDeregistrationProtection with the addition of
+// the ability to pass a context and additional request options.
+//
+// See EnableImageDeregistrationProtection for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) EnableImageDeregistrationProtectionWithContext(ctx aws.Context, input *EnableImageDeregistrationProtectionInput, opts ...request.Option) (*EnableImageDeregistrationProtectionOutput, error) {
+ req, out := c.EnableImageDeregistrationProtectionRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opEnableIpamOrganizationAdminAccount = "EnableIpamOrganizationAdminAccount"
// EnableIpamOrganizationAdminAccountRequest generates a "aws/request.Request" representing the
@@ -40523,6 +40715,9 @@ func (c *EC2) GetConsoleScreenshotRequest(input *GetConsoleScreenshotInput) (req
//
// The returned content is Base64-encoded.
//
+// For more information, see Instance console output (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/troubleshoot-unreachable-instance.html#instance-console-console-output)
+// in the Amazon EC2 User Guide.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -41235,6 +41430,80 @@ func (c *EC2) GetInstanceMetadataDefaultsWithContext(ctx aws.Context, input *Get
return out, req.Send()
}
+const opGetInstanceTpmEkPub = "GetInstanceTpmEkPub"
+
+// GetInstanceTpmEkPubRequest generates a "aws/request.Request" representing the
+// client's request for the GetInstanceTpmEkPub operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetInstanceTpmEkPub for more information on using the GetInstanceTpmEkPub
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the GetInstanceTpmEkPubRequest method.
+// req, resp := client.GetInstanceTpmEkPubRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetInstanceTpmEkPub
+func (c *EC2) GetInstanceTpmEkPubRequest(input *GetInstanceTpmEkPubInput) (req *request.Request, output *GetInstanceTpmEkPubOutput) {
+ op := &request.Operation{
+ Name: opGetInstanceTpmEkPub,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetInstanceTpmEkPubInput{}
+ }
+
+ output = &GetInstanceTpmEkPubOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetInstanceTpmEkPub API operation for Amazon Elastic Compute Cloud.
+//
+// Gets the public endorsement key associated with the Nitro Trusted Platform
+// Module (NitroTPM) for the specified instance.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic Compute Cloud's
+// API operation GetInstanceTpmEkPub for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetInstanceTpmEkPub
+func (c *EC2) GetInstanceTpmEkPub(input *GetInstanceTpmEkPubInput) (*GetInstanceTpmEkPubOutput, error) {
+ req, out := c.GetInstanceTpmEkPubRequest(input)
+ return out, req.Send()
+}
+
+// GetInstanceTpmEkPubWithContext is the same as GetInstanceTpmEkPub with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetInstanceTpmEkPub for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EC2) GetInstanceTpmEkPubWithContext(ctx aws.Context, input *GetInstanceTpmEkPubInput, opts ...request.Option) (*GetInstanceTpmEkPubOutput, error) {
+ req, out := c.GetInstanceTpmEkPubRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opGetInstanceTypesFromInstanceRequirements = "GetInstanceTypesFromInstanceRequirements"
// GetInstanceTypesFromInstanceRequirementsRequest generates a "aws/request.Request" representing the
@@ -47291,9 +47560,9 @@ func (c *EC2) ModifyInstanceMetadataDefaultsRequest(input *ModifyInstanceMetadat
// level in the specified Amazon Web Services Region.
//
// To remove a parameter's account-level default setting, specify no-preference.
-// At instance launch, the value will come from the AMI, or from the launch
-// parameter if specified. For more information, see Order of precedence for
-// instance metadata options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-options.html#instance-metadata-options-order-of-precedence)
+// If an account-level setting is cleared with no-preference, then the instance
+// launch considers the other instance metadata settings. For more information,
+// see Order of precedence for instance metadata options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-options.html#instance-metadata-options-order-of-precedence)
// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -62333,12 +62602,12 @@ type AssociatedRole struct {
// The name of the Amazon S3 bucket in which the Amazon S3 object is stored.
CertificateS3BucketName *string `locationName:"certificateS3BucketName" type:"string"`
- // The key of the Amazon S3 object ey where the certificate, certificate chain,
- // and encrypted private key bundle is stored. The object key is formated as
- // follows: role_arn/certificate_arn.
+ // The key of the Amazon S3 object where the certificate, certificate chain,
+ // and encrypted private key bundle are stored. The object key is formatted
+ // as follows: role_arn/certificate_arn.
CertificateS3ObjectKey *string `locationName:"certificateS3ObjectKey" type:"string"`
- // The ID of the KMS customer master key (CMK) used to encrypt the private key.
+ // The ID of the KMS key used to encrypt the private key.
EncryptionKmsKeyId *string `locationName:"encryptionKmsKeyId" type:"string"`
}
@@ -65615,6 +65884,8 @@ type CancelSpotFleetRequestsInput struct {
// The IDs of the Spot Fleet requests.
//
+ // Constraint: You can specify up to 100 IDs in a single request.
+ //
// SpotFleetRequestIds is a required field
SpotFleetRequestIds []*string `locationName:"spotFleetRequestId" locationNameList:"item" type:"list" required:"true"`
@@ -71421,11 +71692,22 @@ func (s *CreateCoipPoolOutput) SetCoipPool(v *CoipPool) *CreateCoipPoolOutput {
type CreateCustomerGatewayInput struct {
_ struct{} `type:"structure"`
- // For devices that support BGP, the customer gateway's BGP ASN.
+ // For customer gateway devices that support BGP, specify the device's ASN.
+ // You must specify either BgpAsn or BgpAsnExtended when creating the customer
+ // gateway. If the ASN is larger than 2,147,483,647, you must use BgpAsnExtended.
//
// Default: 65000
+ //
+ // Valid values: 1 to 2,147,483,647
BgpAsn *int64 `type:"integer"`
+ // For customer gateway devices that support BGP, specify the device's ASN.
+ // You must specify either BgpAsn or BgpAsnExtended when creating the customer
+ // gateway. If the ASN is larger than 2,147,483,647, you must use BgpAsnExtended.
+ //
+ // Valid values: 2,147,483,648 to 4,294,967,295
+ BgpAsnExtended *int64 `type:"long"`
+
// The Amazon Resource Name (ARN) for the customer gateway certificate.
CertificateArn *string `type:"string"`
@@ -71441,7 +71723,9 @@ type CreateCustomerGatewayInput struct {
DryRun *bool `locationName:"dryRun" type:"boolean"`
// IPv4 address for the customer gateway device's outside interface. The address
- // must be static.
+ // must be static. If OutsideIpAddressType in your VPN connection options is
+ // set to PrivateIpv4, you can use an RFC6598 or RFC1918 private IPv4 address.
+ // If OutsideIpAddressType is set to PublicIpv4, you can use a public IPv4 address.
IpAddress *string `type:"string"`
// This member has been deprecated. The Internet-routable IP address for the
@@ -71494,6 +71778,12 @@ func (s *CreateCustomerGatewayInput) SetBgpAsn(v int64) *CreateCustomerGatewayIn
return s
}
+// SetBgpAsnExtended sets the BgpAsnExtended field's value.
+func (s *CreateCustomerGatewayInput) SetBgpAsnExtended(v int64) *CreateCustomerGatewayInput {
+ s.BgpAsnExtended = &v
+ return s
+}
+
// SetCertificateArn sets the CertificateArn field's value.
func (s *CreateCustomerGatewayInput) SetCertificateArn(v string) *CreateCustomerGatewayInput {
s.CertificateArn = &v
@@ -71954,11 +72244,11 @@ type CreateFleetError struct {
_ struct{} `type:"structure"`
// The error code that indicates why the instance could not be launched. For
- // more information about error codes, see Error codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html).
+ // more information about error codes, see Error codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html).
ErrorCode *string `locationName:"errorCode" type:"string"`
// The error message that describes why the instance could not be launched.
- // For more information about error messages, see Error codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html).
+ // For more information about error messages, see Error codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html).
ErrorMessage *string `locationName:"errorMessage" type:"string"`
// The launch templates and overrides that were used for launching the instances.
@@ -74389,14 +74679,14 @@ type CreateLaunchTemplateVersionInput struct {
// The ID of the launch template.
//
- // You must specify either the LaunchTemplateId or the LaunchTemplateName, but
- // not both.
+ // You must specify either the launch template ID or the launch template name,
+ // but not both.
LaunchTemplateId *string `type:"string"`
// The name of the launch template.
//
- // You must specify the LaunchTemplateName or the LaunchTemplateId, but not
- // both.
+ // You must specify either the launch template ID or the launch template name,
+ // but not both.
LaunchTemplateName *string `min:"3" type:"string"`
// If true, and if a Systems Manager parameter is specified for ImageId, the
@@ -74407,11 +74697,17 @@ type CreateLaunchTemplateVersionInput struct {
// Default: false
ResolveAlias *bool `type:"boolean"`
- // The version number of the launch template version on which to base the new
- // version. The new version inherits the same launch parameters as the source
- // version, except for parameters that you specify in LaunchTemplateData. Snapshots
+ // The version of the launch template on which to base the new version. Snapshots
// applied to the block device mapping are ignored when creating a new version
// unless they are explicitly included.
+ //
+ // If you specify this parameter, the new version inherits the launch parameters
+ // from the source version. If you specify additional launch parameters for
+ // the new version, they overwrite any corresponding launch parameters inherited
+ // from the source version.
+ //
+ // If you omit this parameter, the new version contains only the launch parameters
+ // that you specify for the new version.
SourceVersion *string `type:"string"`
// A description for the version of the launch template.
@@ -83095,10 +83391,18 @@ func (s *CreditSpecificationRequest) SetCpuCredits(v string) *CreditSpecificatio
type CustomerGateway struct {
_ struct{} `type:"structure"`
- // The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number
- // (ASN).
+ // The customer gateway device's Border Gateway Protocol (BGP) Autonomous System
+ // Number (ASN).
+ //
+ // Valid values: 1 to 2,147,483,647
BgpAsn *string `locationName:"bgpAsn" type:"string"`
+ // The customer gateway device's Border Gateway Protocol (BGP) Autonomous System
+ // Number (ASN).
+ //
+ // Valid values: 2,147,483,648 to 4,294,967,295
+ BgpAsnExtended *string `locationName:"bgpAsnExtended" type:"string"`
+
// The Amazon Resource Name (ARN) for the customer gateway certificate.
CertificateArn *string `locationName:"certificateArn" type:"string"`
@@ -83108,7 +83412,10 @@ type CustomerGateway struct {
// The name of customer gateway device.
DeviceName *string `locationName:"deviceName" type:"string"`
- // The IP address of the customer gateway device's outside interface.
+ // IPv4 address for the customer gateway device's outside interface. The address
+ // must be static. If OutsideIpAddressType in your VPN connection options is
+ // set to PrivateIpv4, you can use an RFC6598 or RFC1918 private IPv4 address.
+ // If OutsideIpAddressType is set to PublicIpv4, you can use a public IPv4 address.
IpAddress *string `locationName:"ipAddress" type:"string"`
// The current state of the customer gateway (pending | available | deleting
@@ -83146,6 +83453,12 @@ func (s *CustomerGateway) SetBgpAsn(v string) *CustomerGateway {
return s
}
+// SetBgpAsnExtended sets the BgpAsnExtended field's value.
+func (s *CustomerGateway) SetBgpAsnExtended(v string) *CustomerGateway {
+ s.BgpAsnExtended = &v
+ return s
+}
+
// SetCertificateArn sets the CertificateArn field's value.
func (s *CustomerGateway) SetCertificateArn(v string) *CustomerGateway {
s.CertificateArn = &v
@@ -84237,6 +84550,9 @@ type DeleteFleetsInput struct {
// The IDs of the EC2 Fleets.
//
+ // Constraints: In a single request, you can specify up to 25 instant fleet
+ // IDs and up to 100 maintain or request fleet IDs.
+ //
// FleetIds is a required field
FleetIds []*string `locationName:"FleetId" type:"list" required:"true"`
@@ -85291,14 +85607,14 @@ type DeleteLaunchTemplateInput struct {
// The ID of the launch template.
//
- // You must specify either the LaunchTemplateId or the LaunchTemplateName, but
- // not both.
+ // You must specify either the launch template ID or the launch template name,
+ // but not both.
LaunchTemplateId *string `type:"string"`
// The name of the launch template.
//
- // You must specify either the LaunchTemplateName or the LaunchTemplateId, but
- // not both.
+ // You must specify either the launch template ID or the launch template name,
+ // but not both.
LaunchTemplateName *string `min:"3" type:"string"`
}
@@ -85393,14 +85709,14 @@ type DeleteLaunchTemplateVersionsInput struct {
// The ID of the launch template.
//
- // You must specify either the LaunchTemplateId or the LaunchTemplateName, but
- // not both.
+ // You must specify either the launch template ID or the launch template name,
+ // but not both.
LaunchTemplateId *string `type:"string"`
// The name of the launch template.
//
- // You must specify either the LaunchTemplateName or the LaunchTemplateId, but
- // not both.
+ // You must specify either the launch template ID or the launch template name,
+ // but not both.
LaunchTemplateName *string `min:"3" type:"string"`
// The version numbers of one or more launch template versions to delete. You
@@ -96821,6 +97137,9 @@ type DescribeImageAttributeOutput struct {
// The boot mode.
BootMode *AttributeValue `locationName:"bootMode" type:"structure"`
+ // Indicates whether deregistration protection is enabled for the AMI.
+ DeregistrationProtection *AttributeValue `locationName:"deregistrationProtection" type:"structure"`
+
// A description for the AMI.
Description *AttributeValue `locationName:"description" type:"structure"`
@@ -96900,6 +97219,12 @@ func (s *DescribeImageAttributeOutput) SetBootMode(v *AttributeValue) *DescribeI
return s
}
+// SetDeregistrationProtection sets the DeregistrationProtection field's value.
+func (s *DescribeImageAttributeOutput) SetDeregistrationProtection(v *AttributeValue) *DescribeImageAttributeOutput {
+ s.DeregistrationProtection = v
+ return s
+}
+
// SetDescription sets the Description field's value.
func (s *DescribeImageAttributeOutput) SetDescription(v *AttributeValue) *DescribeImageAttributeOutput {
s.Description = v
@@ -98574,14 +98899,26 @@ type DescribeInstanceTypeOfferingsInput struct {
// One or more filters. Filter names and values are case-sensitive.
//
- // * location - This depends on the location type. For example, if the location
- // type is region (default), the location is the Region code (for example,
- // us-east-2.)
+ // * instance-type - The instance type. For a list of possible values, see
+ // Instance (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Instance.html).
//
- // * instance-type - The instance type. For example, c5.2xlarge.
+ // * location - The location. For a list of possible identifiers, see Regions
+ // and Zones (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html).
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
// The location type.
+ //
+ // * availability-zone - The Availability Zone. When you specify a location
+ // filter, it must be an Availability Zone for the current Region.
+ //
+ // * availability-zone-id - The AZ ID. When you specify a location filter,
+ // it must be an AZ ID for the current Region.
+ //
+ // * outpost - The Outpost ARN. When you specify a location filter, it must
+ // be an Outpost ARN for the current Region.
+ //
+ // * region - The current Region. If you specify a location filter, it must
+ // match the current Region.
LocationType *string `type:"string" enum:"LocationType"`
// The maximum number of items to return for this request. To get the next page
@@ -98658,7 +98995,7 @@ func (s *DescribeInstanceTypeOfferingsInput) SetNextToken(v string) *DescribeIns
type DescribeInstanceTypeOfferingsOutput struct {
_ struct{} `type:"structure"`
- // The instance types offered.
+ // The instance types offered in the location.
InstanceTypeOfferings []*InstanceTypeOffering `locationName:"instanceTypeOfferingSet" locationNameList:"item" type:"list"`
// The token to include in another request to get the next page of items. This
@@ -98850,8 +99187,7 @@ type DescribeInstanceTypesInput struct {
// can be configured for the instance type. For example, "1" or "1,2".
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
- // The instance types. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
- // in the Amazon EC2 User Guide.
+ // The instance types.
InstanceTypes []*string `locationName:"InstanceType" type:"list" enum:"InstanceType"`
// The maximum number of items to return for this request. To get the next page
@@ -98928,8 +99264,7 @@ func (s *DescribeInstanceTypesInput) SetNextToken(v string) *DescribeInstanceTyp
type DescribeInstanceTypesOutput struct {
_ struct{} `type:"structure"`
- // The instance type. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
- // in the Amazon EC2 User Guide.
+ // The instance type.
InstanceTypes []*InstanceTypeInfo `locationName:"instanceTypeSet" locationNameList:"item" type:"list"`
// The token to include in another request to get the next page of items. This
@@ -100661,7 +100996,8 @@ type DescribeLaunchTemplateVersionsInput struct {
// The ID of the launch template.
//
// To describe one or more versions of a specified launch template, you must
- // specify either the LaunchTemplateId or the LaunchTemplateName, but not both.
+ // specify either the launch template ID or the launch template name, but not
+ // both.
//
// To describe all the latest or default launch template versions in your account,
// you must omit this parameter.
@@ -100670,7 +101006,8 @@ type DescribeLaunchTemplateVersionsInput struct {
// The name of the launch template.
//
// To describe one or more versions of a specified launch template, you must
- // specify either the LaunchTemplateName or the LaunchTemplateId, but not both.
+ // specify either the launch template name or the launch template ID, but not
+ // both.
//
// To describe all the latest or default launch template versions in your account,
// you must omit this parameter.
@@ -103334,6 +103671,11 @@ func (s *DescribeNetworkInterfaceAttributeInput) SetNetworkInterfaceId(v string)
type DescribeNetworkInterfaceAttributeOutput struct {
_ struct{} `type:"structure"`
+ // Indicates whether to assign a public IPv4 address to a network interface.
+ // This option can be enabled for any network interface but will only apply
+ // to the primary network interface (eth0).
+ AssociatePublicIpAddress *bool `locationName:"associatePublicIpAddress" type:"boolean"`
+
// The attachment (if any) of the network interface.
Attachment *NetworkInterfaceAttachment `locationName:"attachment" type:"structure"`
@@ -103368,6 +103710,12 @@ func (s DescribeNetworkInterfaceAttributeOutput) GoString() string {
return s.String()
}
+// SetAssociatePublicIpAddress sets the AssociatePublicIpAddress field's value.
+func (s *DescribeNetworkInterfaceAttributeOutput) SetAssociatePublicIpAddress(v bool) *DescribeNetworkInterfaceAttributeOutput {
+ s.AssociatePublicIpAddress = &v
+ return s
+}
+
// SetAttachment sets the Attachment field's value.
func (s *DescribeNetworkInterfaceAttributeOutput) SetAttachment(v *NetworkInterfaceAttachment) *DescribeNetworkInterfaceAttributeOutput {
s.Attachment = v
@@ -103729,7 +104077,7 @@ func (s *DescribeNetworkInterfacesInput) SetNextToken(v string) *DescribeNetwork
type DescribeNetworkInterfacesOutput struct {
_ struct{} `type:"structure"`
- // Information about one or more network interfaces.
+ // Information about the network interfaces.
NetworkInterfaces []*NetworkInterface `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"`
// The token to include in another request to get the next page of items. This
@@ -107826,13 +108174,8 @@ type DescribeTagsInput struct {
//
// * resource-id - The ID of the resource.
//
- // * resource-type - The resource type (customer-gateway | dedicated-host
- // | dhcp-options | elastic-ip | fleet | fpga-image | host-reservation |
- // image | instance | internet-gateway | key-pair | launch-template | natgateway
- // | network-acl | network-interface | placement-group | reserved-instances
- // | route-table | security-group | snapshot | spot-instances-request | subnet
- // | volume | vpc | vpc-endpoint | vpc-endpoint-service | vpc-peering-connection
- // | vpn-connection | vpn-gateway).
+ // * resource-type - The resource type. For a list of possible values, see
+ // TagSpecification (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_TagSpecification.html).
//
// * tag: - The key/value combination of the tag. For example, specify
// "tag:Owner" for the filter name and "TeamA" for the filter value to find
@@ -109610,6 +109953,12 @@ type DescribeTransitGatewaysInput struct {
// | modifying | pending).
//
// * transit-gateway-id - The ID of the transit gateway.
+ //
+ // * tag-key - The key/value combination of a tag assigned to the resource.
+ // Use the tag key in the filter name and the tag value as the filter value.
+ // For example, to find all resources that have a tag with the key Owner
+ // and the value TeamA, specify tag:Owner for the filter name and TeamA for
+ // the filter value.
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
// The maximum number of results to return with a single call. To retrieve the
@@ -114674,6 +115023,95 @@ func (s *DisableImageDeprecationOutput) SetReturn(v bool) *DisableImageDeprecati
return s
}
+type DisableImageDeregistrationProtectionInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the AMI.
+ //
+ // ImageId is a required field
+ ImageId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DisableImageDeregistrationProtectionInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DisableImageDeregistrationProtectionInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DisableImageDeregistrationProtectionInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DisableImageDeregistrationProtectionInput"}
+ if s.ImageId == nil {
+ invalidParams.Add(request.NewErrParamRequired("ImageId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *DisableImageDeregistrationProtectionInput) SetDryRun(v bool) *DisableImageDeregistrationProtectionInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetImageId sets the ImageId field's value.
+func (s *DisableImageDeregistrationProtectionInput) SetImageId(v string) *DisableImageDeregistrationProtectionInput {
+ s.ImageId = &v
+ return s
+}
+
+type DisableImageDeregistrationProtectionOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Returns true if the request succeeds; otherwise, it returns an error.
+ Return *string `locationName:"return" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DisableImageDeregistrationProtectionOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DisableImageDeregistrationProtectionOutput) GoString() string {
+ return s.String()
+}
+
+// SetReturn sets the Return field's value.
+func (s *DisableImageDeregistrationProtectionOutput) SetReturn(v string) *DisableImageDeregistrationProtectionOutput {
+ s.Return = &v
+ return s
+}
+
type DisableImageInput struct {
_ struct{} `type:"structure"`
@@ -119646,6 +120084,105 @@ func (s *EnableImageDeprecationOutput) SetReturn(v bool) *EnableImageDeprecation
return s
}
+type EnableImageDeregistrationProtectionInput struct {
+ _ struct{} `type:"structure"`
+
+ // Checks whether you have the required permissions for the action, without
+ // actually making the request, and provides an error response. If you have
+ // the required permissions, the error response is DryRunOperation. Otherwise,
+ // it is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the AMI.
+ //
+ // ImageId is a required field
+ ImageId *string `type:"string" required:"true"`
+
+ // If true, enforces deregistration protection for 24 hours after deregistration
+ // protection is disabled.
+ WithCooldown *bool `type:"boolean"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s EnableImageDeregistrationProtectionInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s EnableImageDeregistrationProtectionInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *EnableImageDeregistrationProtectionInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "EnableImageDeregistrationProtectionInput"}
+ if s.ImageId == nil {
+ invalidParams.Add(request.NewErrParamRequired("ImageId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *EnableImageDeregistrationProtectionInput) SetDryRun(v bool) *EnableImageDeregistrationProtectionInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetImageId sets the ImageId field's value.
+func (s *EnableImageDeregistrationProtectionInput) SetImageId(v string) *EnableImageDeregistrationProtectionInput {
+ s.ImageId = &v
+ return s
+}
+
+// SetWithCooldown sets the WithCooldown field's value.
+func (s *EnableImageDeregistrationProtectionInput) SetWithCooldown(v bool) *EnableImageDeregistrationProtectionInput {
+ s.WithCooldown = &v
+ return s
+}
+
+type EnableImageDeregistrationProtectionOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Returns true if the request succeeds; otherwise, it returns an error.
+ Return *string `locationName:"return" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s EnableImageDeregistrationProtectionOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s EnableImageDeregistrationProtectionOutput) GoString() string {
+ return s.String()
+}
+
+// SetReturn sets the Return field's value.
+func (s *EnableImageDeregistrationProtectionOutput) SetReturn(v string) *EnableImageDeregistrationProtectionOutput {
+ s.Return = &v
+ return s
+}
+
type EnableImageInput struct {
_ struct{} `type:"structure"`
@@ -126190,6 +126727,155 @@ func (s *GetInstanceMetadataDefaultsOutput) SetAccountLevel(v *InstanceMetadataD
return s
}
+type GetInstanceTpmEkPubInput struct {
+ _ struct{} `type:"structure"`
+
+ // Specify this parameter to verify whether the request will succeed, without
+ // actually making the request. If the request will succeed, the response is
+ // DryRunOperation. Otherwise, the response is UnauthorizedOperation.
+ DryRun *bool `type:"boolean"`
+
+ // The ID of the instance for which to get the public endorsement key.
+ //
+ // InstanceId is a required field
+ InstanceId *string `type:"string" required:"true"`
+
+ // The required public endorsement key format. Specify der for a DER-encoded
+ // public key that is compatible with OpenSSL. Specify tpmt for a TPM 2.0 format
+ // that is compatible with tpm2-tools. The returned key is base64 encoded.
+ //
+ // KeyFormat is a required field
+ KeyFormat *string `type:"string" required:"true" enum:"EkPubKeyFormat"`
+
+ // The required public endorsement key type.
+ //
+ // KeyType is a required field
+ KeyType *string `type:"string" required:"true" enum:"EkPubKeyType"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetInstanceTpmEkPubInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetInstanceTpmEkPubInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetInstanceTpmEkPubInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetInstanceTpmEkPubInput"}
+ if s.InstanceId == nil {
+ invalidParams.Add(request.NewErrParamRequired("InstanceId"))
+ }
+ if s.KeyFormat == nil {
+ invalidParams.Add(request.NewErrParamRequired("KeyFormat"))
+ }
+ if s.KeyType == nil {
+ invalidParams.Add(request.NewErrParamRequired("KeyType"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDryRun sets the DryRun field's value.
+func (s *GetInstanceTpmEkPubInput) SetDryRun(v bool) *GetInstanceTpmEkPubInput {
+ s.DryRun = &v
+ return s
+}
+
+// SetInstanceId sets the InstanceId field's value.
+func (s *GetInstanceTpmEkPubInput) SetInstanceId(v string) *GetInstanceTpmEkPubInput {
+ s.InstanceId = &v
+ return s
+}
+
+// SetKeyFormat sets the KeyFormat field's value.
+func (s *GetInstanceTpmEkPubInput) SetKeyFormat(v string) *GetInstanceTpmEkPubInput {
+ s.KeyFormat = &v
+ return s
+}
+
+// SetKeyType sets the KeyType field's value.
+func (s *GetInstanceTpmEkPubInput) SetKeyType(v string) *GetInstanceTpmEkPubInput {
+ s.KeyType = &v
+ return s
+}
+
+type GetInstanceTpmEkPubOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the instance.
+ InstanceId *string `locationName:"instanceId" type:"string"`
+
+ // The public endorsement key format.
+ KeyFormat *string `locationName:"keyFormat" type:"string" enum:"EkPubKeyFormat"`
+
+ // The public endorsement key type.
+ KeyType *string `locationName:"keyType" type:"string" enum:"EkPubKeyType"`
+
+ // The public endorsement key material.
+ //
+ // KeyValue is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by GetInstanceTpmEkPubOutput's
+ // String and GoString methods.
+ KeyValue *string `locationName:"keyValue" type:"string" sensitive:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetInstanceTpmEkPubOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetInstanceTpmEkPubOutput) GoString() string {
+ return s.String()
+}
+
+// SetInstanceId sets the InstanceId field's value.
+func (s *GetInstanceTpmEkPubOutput) SetInstanceId(v string) *GetInstanceTpmEkPubOutput {
+ s.InstanceId = &v
+ return s
+}
+
+// SetKeyFormat sets the KeyFormat field's value.
+func (s *GetInstanceTpmEkPubOutput) SetKeyFormat(v string) *GetInstanceTpmEkPubOutput {
+ s.KeyFormat = &v
+ return s
+}
+
+// SetKeyType sets the KeyType field's value.
+func (s *GetInstanceTpmEkPubOutput) SetKeyType(v string) *GetInstanceTpmEkPubOutput {
+ s.KeyType = &v
+ return s
+}
+
+// SetKeyValue sets the KeyValue field's value.
+func (s *GetInstanceTpmEkPubOutput) SetKeyValue(v string) *GetInstanceTpmEkPubOutput {
+ s.KeyValue = &v
+ return s
+}
+
type GetInstanceTypesFromInstanceRequirementsInput struct {
_ struct{} `type:"structure"`
@@ -131795,6 +132481,9 @@ type Image struct {
// the seconds to the nearest minute.
DeprecationTime *string `locationName:"deprecationTime" type:"string"`
+ // Indicates whether deregistration protection is enabled for the AMI.
+ DeregistrationProtection *string `locationName:"deregistrationProtection" type:"string"`
+
// The description of the AMI that was provided during image creation.
Description *string `locationName:"description" type:"string"`
@@ -131828,6 +132517,13 @@ type Image struct {
// images.
KernelId *string `locationName:"kernelId" type:"string"`
+ // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601),
+ // when the AMI was last used to launch an EC2 instance. When the AMI is used
+ // to launch an instance, there is a 24-hour delay before that usage is reported.
+ //
+ // lastLaunchedTime data is available starting April 2017.
+ LastLaunchedTime *string `locationName:"lastLaunchedTime" type:"string"`
+
// The name of the AMI that was provided during image creation.
Name *string `locationName:"name" type:"string"`
@@ -131947,6 +132643,12 @@ func (s *Image) SetDeprecationTime(v string) *Image {
return s
}
+// SetDeregistrationProtection sets the DeregistrationProtection field's value.
+func (s *Image) SetDeregistrationProtection(v string) *Image {
+ s.DeregistrationProtection = &v
+ return s
+}
+
// SetDescription sets the Description field's value.
func (s *Image) SetDescription(v string) *Image {
s.Description = &v
@@ -132001,6 +132703,12 @@ func (s *Image) SetKernelId(v string) *Image {
return s
}
+// SetLastLaunchedTime sets the LastLaunchedTime field's value.
+func (s *Image) SetLastLaunchedTime(v string) *Image {
+ s.LastLaunchedTime = &v
+ return s
+}
+
// SetName sets the Name field's value.
func (s *Image) SetName(v string) *Image {
s.Name = &v
@@ -138635,6 +139343,10 @@ type InstanceTypeInfo struct {
// Indicates whether NitroTPM is supported.
NitroTpmSupport *string `locationName:"nitroTpmSupport" type:"string" enum:"NitroTpmSupport"`
+ // Indicates whether a local Precision Time Protocol (PTP) hardware clock (PHC)
+ // is supported.
+ PhcSupport *string `locationName:"phcSupport" type:"string" enum:"PhcSupport"`
+
// Describes the placement group settings for the instance type.
PlacementGroupInfo *PlacementGroupInfo `locationName:"placementGroupInfo" type:"structure"`
@@ -138808,6 +139520,12 @@ func (s *InstanceTypeInfo) SetNitroTpmSupport(v string) *InstanceTypeInfo {
return s
}
+// SetPhcSupport sets the PhcSupport field's value.
+func (s *InstanceTypeInfo) SetPhcSupport(v string) *InstanceTypeInfo {
+ s.PhcSupport = &v
+ return s
+}
+
// SetPlacementGroupInfo sets the PlacementGroupInfo field's value.
func (s *InstanceTypeInfo) SetPlacementGroupInfo(v *PlacementGroupInfo) *InstanceTypeInfo {
s.PlacementGroupInfo = v
@@ -144097,7 +144815,11 @@ type LaunchTemplateInstanceNetworkInterfaceSpecificationRequest struct {
// A description for the network interface.
Description *string `type:"string"`
- // The device index for the network interface attachment.
+ // The device index for the network interface attachment. Each network interface
+ // requires a device index. If you create a launch template that includes secondary
+ // network interfaces but not a primary network interface, then you must add
+ // a primary network interface as a launch parameter when you launch an instance
+ // from the template.
DeviceIndex *int64 `type:"integer"`
// Configure ENA Express settings for your launch template.
@@ -144823,30 +145545,27 @@ func (s *LaunchTemplatePrivateDnsNameOptionsRequest) SetHostnameType(v string) *
return s
}
-// The launch template to use. You must specify either the launch template ID
-// or launch template name in the request, but not both.
+// Describes the launch template to use.
type LaunchTemplateSpecification struct {
_ struct{} `type:"structure"`
// The ID of the launch template.
//
- // You must specify the LaunchTemplateId or the LaunchTemplateName, but not
- // both.
+ // You must specify either the launch template ID or the launch template name,
+ // but not both.
LaunchTemplateId *string `type:"string"`
// The name of the launch template.
//
- // You must specify the LaunchTemplateName or the LaunchTemplateId, but not
- // both.
+ // You must specify either the launch template ID or the launch template name,
+ // but not both.
LaunchTemplateName *string `type:"string"`
// The launch template version number, $Latest, or $Default.
//
- // If the value is $Latest, Amazon EC2 uses the latest version of the launch
- // template.
+ // A value of $Latest uses the latest version of the launch template.
//
- // If the value is $Default, Amazon EC2 uses the default version of the launch
- // template.
+ // A value of $Default uses the default version of the launch template.
//
// Default: The default version of the launch template.
Version *string `type:"string"`
@@ -150096,11 +150815,10 @@ type ModifyInstanceMetadataDefaultsInput struct {
// instance metadata can't be accessed.
HttpEndpoint *string `type:"string" enum:"DefaultInstanceMetadataEndpointState"`
- // The maximum number of hops that the metadata token can travel.
- //
- // Minimum: 1
+ // The maximum number of hops that the metadata token can travel. To indicate
+ // no preference, specify -1.
//
- // Maximum: 64
+ // Possible values: Integers from 1 to 64, and -1 to indicate no preference
HttpPutResponseHopLimit *int64 `type:"integer"`
// Indicates whether IMDSv2 is required.
@@ -151241,14 +151959,14 @@ type ModifyLaunchTemplateInput struct {
// The ID of the launch template.
//
- // You must specify either the LaunchTemplateId or the LaunchTemplateName, but
- // not both.
+ // You must specify either the launch template ID or the launch template name,
+ // but not both.
LaunchTemplateId *string `type:"string"`
// The name of the launch template.
//
- // You must specify either the LaunchTemplateName or the LaunchTemplateId, but
- // not both.
+ // You must specify either the launch template ID or the launch template name,
+ // but not both.
LaunchTemplateName *string `min:"3" type:"string"`
}
@@ -151636,6 +152354,11 @@ func (s *ModifyManagedPrefixListOutput) SetPrefixList(v *ManagedPrefixList) *Mod
type ModifyNetworkInterfaceAttributeInput struct {
_ struct{} `type:"structure"`
+ // Indicates whether to assign a public IPv4 address to a network interface.
+ // This option can be enabled for any network interface but will only apply
+ // to the primary network interface (eth0).
+ AssociatePublicIpAddress *bool `type:"boolean"`
+
// Information about the interface attachment. If modifying the delete on termination
// attribute, you must specify the ID of the interface attachment.
Attachment *NetworkInterfaceAttachmentChanges `locationName:"attachment" type:"structure"`
@@ -151722,6 +152445,12 @@ func (s *ModifyNetworkInterfaceAttributeInput) Validate() error {
return nil
}
+// SetAssociatePublicIpAddress sets the AssociatePublicIpAddress field's value.
+func (s *ModifyNetworkInterfaceAttributeInput) SetAssociatePublicIpAddress(v bool) *ModifyNetworkInterfaceAttributeInput {
+ s.AssociatePublicIpAddress = &v
+ return s
+}
+
// SetAttachment sets the Attachment field's value.
func (s *ModifyNetworkInterfaceAttributeInput) SetAttachment(v *NetworkInterfaceAttachmentChanges) *ModifyNetworkInterfaceAttributeInput {
s.Attachment = v
@@ -167250,8 +167979,7 @@ type RequestLaunchTemplateData struct {
// The monitoring for the instance.
Monitoring *LaunchTemplatesMonitoringRequest `type:"structure"`
- // One or more network interfaces. If you specify a network interface, you must
- // specify any security groups and subnets as part of the network interface.
+ // The network interfaces for the instance.
NetworkInterfaces []*LaunchTemplateInstanceNetworkInterfaceSpecificationRequest `locationName:"NetworkInterface" locationNameList:"InstanceNetworkInterfaceSpecification" type:"list"`
// The placement for the instance.
@@ -167268,12 +167996,17 @@ type RequestLaunchTemplateData struct {
// in the Amazon Elastic Compute Cloud User Guide.
RamDiskId *string `type:"string"`
- // One or more security group IDs. You can create a security group using CreateSecurityGroup
- // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateSecurityGroup.html).
+ // The IDs of the security groups.
+ //
+ // If you specify a network interface, you must specify any security groups
+ // as part of the network interface instead of using this parameter.
SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"`
- // One or more security group names. For a nondefault VPC, you must use security
+ // The names of the security groups. For a nondefault VPC, you must use security
// group IDs instead.
+ //
+ // If you specify a network interface, you must specify any security groups
+ // as part of the network interface instead of using this parameter.
SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"SecurityGroup" type:"list"`
// The tags to apply to the resources that are created during instance launch.
@@ -171920,26 +172653,15 @@ type RunInstancesInput struct {
// Default: false
EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"`
- // Deprecated.
+ // An elastic GPU to associate with the instance.
//
- // Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads
- // that require graphics acceleration, we recommend that you use Amazon EC2
- // G4ad, G4dn, or G5 instances.
+ // Amazon Elastic Graphics reached end of life on January 8, 2024.
ElasticGpuSpecification []*ElasticGpuSpecification `locationNameList:"item" type:"list"`
- // An elastic inference accelerator to associate with the instance. Elastic
- // inference accelerators are a resource you can attach to your Amazon EC2 instances
- // to accelerate your Deep Learning (DL) inference workloads.
+ // An elastic inference accelerator to associate with the instance.
//
- // You cannot specify accelerators from different generations in the same request.
- //
- // Starting April 15, 2023, Amazon Web Services will not onboard new customers
- // to Amazon Elastic Inference (EI), and will help current customers migrate
- // their workloads to options that offer better price and performance. After
- // April 15, 2023, new customers will not be able to launch instances with Amazon
- // EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However,
- // customers who have used Amazon EI at least once during the past 30-day period
- // are considered current customers and will be able to continue using the service.
+ // Amazon Elastic Inference (EI) is no longer available to new customers. For
+ // more information, see Amazon Elastic Inference FAQs (http://aws.amazon.com/machine-learning/elastic-inference/faqs/).
ElasticInferenceAccelerators []*ElasticInferenceAccelerator `locationName:"ElasticInferenceAccelerator" locationNameList:"item" type:"list"`
// If you’re launching an instance into a dual-stack or IPv6-only subnet,
@@ -172030,9 +172752,8 @@ type RunInstancesInput struct {
// you choose an AMI that is configured to allow users another way to log in.
KeyName *string `type:"string"`
- // The launch template to use to launch the instances. Any parameters that you
- // specify in RunInstances override the same parameters in the launch template.
- // You can specify either the name or ID of a launch template, but not both.
+ // The launch template. Any additional parameters that you specify for the new
+ // instance overwrite the corresponding parameters included in the launch template.
LaunchTemplate *LaunchTemplateSpecification `type:"structure"`
// The license configurations.
@@ -172072,9 +172793,7 @@ type RunInstancesInput struct {
// Specifies whether detailed monitoring is enabled for the instance.
Monitoring *RunInstancesMonitoringEnabled `type:"structure"`
- // The network interfaces to associate with the instance. If you specify a network
- // interface, you must specify any security groups and subnets as part of the
- // network interface.
+ // The network interfaces to associate with the instance.
NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterface" locationNameList:"item" type:"list"`
// The placement for the instance.
@@ -172111,13 +172830,13 @@ type RunInstancesInput struct {
// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateSecurityGroup.html).
//
// If you specify a network interface, you must specify any security groups
- // as part of the network interface.
+ // as part of the network interface instead of using this parameter.
SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"`
// [Default VPC] The names of the security groups.
//
// If you specify a network interface, you must specify any security groups
- // as part of the network interface.
+ // as part of the network interface instead of using this parameter.
//
// Default: Amazon EC2 uses the default security group.
SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"SecurityGroup" type:"list"`
@@ -172125,7 +172844,7 @@ type RunInstancesInput struct {
// The ID of the subnet to launch the instance into.
//
// If you specify a network interface, you must specify any subnets as part
- // of the network interface.
+ // of the network interface instead of using this parameter.
SubnetId *string `type:"string"`
// The tags to apply to the resources that are created during instance launch.
@@ -174219,7 +174938,8 @@ type SearchTransitGatewayRoutesInput struct {
// Filters is a required field
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list" required:"true"`
- // The maximum number of routes to return.
+ // The maximum number of routes to return. If a value is not provided, the default
+ // is 1000.
MaxResults *int64 `min:"5" type:"integer"`
// The ID of the transit gateway route table.
@@ -176549,11 +177269,11 @@ type SpotFleetLaunchSpecification struct {
// Enable or disable monitoring for the instances.
Monitoring *SpotFleetMonitoring `locationName:"monitoring" type:"structure"`
- // One or more network interfaces. If you specify a network interface, you must
- // specify subnet IDs and security group IDs using the network interface.
+ // The network interfaces.
//
- // SpotFleetLaunchSpecification currently does not support Elastic Fabric Adapter
- // (EFA). To specify an EFA, you must use LaunchTemplateConfig (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_LaunchTemplateConfig.html).
+ // SpotFleetLaunchSpecification does not support Elastic Fabric Adapter (EFA).
+ // You must use LaunchTemplateConfig (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_LaunchTemplateConfig.html)
+ // instead.
NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"`
// The placement information.
@@ -176566,6 +177286,9 @@ type SpotFleetLaunchSpecification struct {
RamdiskId *string `locationName:"ramdiskId" type:"string"`
// The security groups.
+ //
+ // If you specify a network interface, you must specify any security groups
+ // as part of the network interface instead of using this parameter.
SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"`
// The maximum price per unit hour that you are willing to pay for a Spot Instance.
@@ -176580,6 +177303,9 @@ type SpotFleetLaunchSpecification struct {
// The IDs of the subnets in which to launch the instances. To specify multiple
// subnets, separate them using commas; for example, "subnet-1234abcdeexample1,
// subnet-0987cdef6example2".
+ //
+ // If you specify a network interface, you must specify any subnets as part
+ // of the network interface instead of using this parameter.
SubnetId *string `locationName:"subnetId" type:"string"`
// The tags to apply during creation.
@@ -187619,6 +188345,9 @@ func (s *VgwTelemetry) SetStatusMessage(v string) *VgwTelemetry {
type Volume struct {
_ struct{} `type:"structure"`
+ //
+ // This parameter is not returned by CreateVolume.
+ //
// Information about the volume attachments.
Attachments []*VolumeAttachment `locationName:"attachmentSet" locationNameList:"item" type:"list"`
@@ -187631,6 +188360,9 @@ type Volume struct {
// Indicates whether the volume is encrypted.
Encrypted *bool `locationName:"encrypted" type:"boolean"`
+ //
+ // This parameter is not returned by CreateVolume.
+ //
// Indicates whether the volume was created using fast snapshot restore.
FastRestored *bool `locationName:"fastRestored" type:"boolean"`
@@ -187656,6 +188388,9 @@ type Volume struct {
// The snapshot from which the volume was created, if applicable.
SnapshotId *string `locationName:"snapshotId" type:"string"`
+ //
+ // This parameter is not returned by CreateVolume.
+ //
// Reserved for future use.
SseType *string `locationName:"sseType" type:"string" enum:"SSEType"`
@@ -192285,6 +193020,38 @@ func Ec2InstanceConnectEndpointState_Values() []string {
}
}
+const (
+ // EkPubKeyFormatDer is a EkPubKeyFormat enum value
+ EkPubKeyFormatDer = "der"
+
+ // EkPubKeyFormatTpmt is a EkPubKeyFormat enum value
+ EkPubKeyFormatTpmt = "tpmt"
+)
+
+// EkPubKeyFormat_Values returns all elements of the EkPubKeyFormat enum
+func EkPubKeyFormat_Values() []string {
+ return []string{
+ EkPubKeyFormatDer,
+ EkPubKeyFormatTpmt,
+ }
+}
+
+const (
+ // EkPubKeyTypeRsa2048 is a EkPubKeyType enum value
+ EkPubKeyTypeRsa2048 = "rsa-2048"
+
+ // EkPubKeyTypeEccSecP384 is a EkPubKeyType enum value
+ EkPubKeyTypeEccSecP384 = "ecc-sec-p384"
+)
+
+// EkPubKeyType_Values returns all elements of the EkPubKeyType enum
+func EkPubKeyType_Values() []string {
+ return []string{
+ EkPubKeyTypeRsa2048,
+ EkPubKeyTypeEccSecP384,
+ }
+}
+
const (
// ElasticGpuStateAttached is a ElasticGpuState enum value
ElasticGpuStateAttached = "ATTACHED"
@@ -193041,6 +193808,9 @@ const (
// ImageAttributeNameImdsSupport is a ImageAttributeName enum value
ImageAttributeNameImdsSupport = "imdsSupport"
+
+ // ImageAttributeNameDeregistrationProtection is a ImageAttributeName enum value
+ ImageAttributeNameDeregistrationProtection = "deregistrationProtection"
)
// ImageAttributeName_Values returns all elements of the ImageAttributeName enum
@@ -193058,6 +193828,7 @@ func ImageAttributeName_Values() []string {
ImageAttributeNameUefiData,
ImageAttributeNameLastLaunchedTime,
ImageAttributeNameImdsSupport,
+ ImageAttributeNameDeregistrationProtection,
}
}
@@ -198045,6 +198816,9 @@ const (
// NetworkInterfaceAttributeAttachment is a NetworkInterfaceAttribute enum value
NetworkInterfaceAttributeAttachment = "attachment"
+
+ // NetworkInterfaceAttributeAssociatePublicIpAddress is a NetworkInterfaceAttribute enum value
+ NetworkInterfaceAttributeAssociatePublicIpAddress = "associatePublicIpAddress"
)
// NetworkInterfaceAttribute_Values returns all elements of the NetworkInterfaceAttribute enum
@@ -198054,6 +198828,7 @@ func NetworkInterfaceAttribute_Values() []string {
NetworkInterfaceAttributeGroupSet,
NetworkInterfaceAttributeSourceDestCheck,
NetworkInterfaceAttributeAttachment,
+ NetworkInterfaceAttributeAssociatePublicIpAddress,
}
}
@@ -198417,6 +199192,22 @@ func PermissionGroup_Values() []string {
}
}
+const (
+ // PhcSupportUnsupported is a PhcSupport enum value
+ PhcSupportUnsupported = "unsupported"
+
+ // PhcSupportSupported is a PhcSupport enum value
+ PhcSupportSupported = "supported"
+)
+
+// PhcSupport_Values returns all elements of the PhcSupport enum
+func PhcSupport_Values() []string {
+ return []string{
+ PhcSupportUnsupported,
+ PhcSupportSupported,
+ }
+}
+
const (
// PlacementGroupStatePending is a PlacementGroupState enum value
PlacementGroupStatePending = "pending"
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go
index 4be7c47860..5a1c447647 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go
@@ -1934,6 +1934,10 @@ type EC2API interface {
DisableImageDeprecationWithContext(aws.Context, *ec2.DisableImageDeprecationInput, ...request.Option) (*ec2.DisableImageDeprecationOutput, error)
DisableImageDeprecationRequest(*ec2.DisableImageDeprecationInput) (*request.Request, *ec2.DisableImageDeprecationOutput)
+ DisableImageDeregistrationProtection(*ec2.DisableImageDeregistrationProtectionInput) (*ec2.DisableImageDeregistrationProtectionOutput, error)
+ DisableImageDeregistrationProtectionWithContext(aws.Context, *ec2.DisableImageDeregistrationProtectionInput, ...request.Option) (*ec2.DisableImageDeregistrationProtectionOutput, error)
+ DisableImageDeregistrationProtectionRequest(*ec2.DisableImageDeregistrationProtectionInput) (*request.Request, *ec2.DisableImageDeregistrationProtectionOutput)
+
DisableIpamOrganizationAdminAccount(*ec2.DisableIpamOrganizationAdminAccountInput) (*ec2.DisableIpamOrganizationAdminAccountOutput, error)
DisableIpamOrganizationAdminAccountWithContext(aws.Context, *ec2.DisableIpamOrganizationAdminAccountInput, ...request.Option) (*ec2.DisableIpamOrganizationAdminAccountOutput, error)
DisableIpamOrganizationAdminAccountRequest(*ec2.DisableIpamOrganizationAdminAccountInput) (*request.Request, *ec2.DisableIpamOrganizationAdminAccountOutput)
@@ -2054,6 +2058,10 @@ type EC2API interface {
EnableImageDeprecationWithContext(aws.Context, *ec2.EnableImageDeprecationInput, ...request.Option) (*ec2.EnableImageDeprecationOutput, error)
EnableImageDeprecationRequest(*ec2.EnableImageDeprecationInput) (*request.Request, *ec2.EnableImageDeprecationOutput)
+ EnableImageDeregistrationProtection(*ec2.EnableImageDeregistrationProtectionInput) (*ec2.EnableImageDeregistrationProtectionOutput, error)
+ EnableImageDeregistrationProtectionWithContext(aws.Context, *ec2.EnableImageDeregistrationProtectionInput, ...request.Option) (*ec2.EnableImageDeregistrationProtectionOutput, error)
+ EnableImageDeregistrationProtectionRequest(*ec2.EnableImageDeregistrationProtectionInput) (*request.Request, *ec2.EnableImageDeregistrationProtectionOutput)
+
EnableIpamOrganizationAdminAccount(*ec2.EnableIpamOrganizationAdminAccountInput) (*ec2.EnableIpamOrganizationAdminAccountOutput, error)
EnableIpamOrganizationAdminAccountWithContext(aws.Context, *ec2.EnableIpamOrganizationAdminAccountInput, ...request.Option) (*ec2.EnableIpamOrganizationAdminAccountOutput, error)
EnableIpamOrganizationAdminAccountRequest(*ec2.EnableIpamOrganizationAdminAccountInput) (*request.Request, *ec2.EnableIpamOrganizationAdminAccountOutput)
@@ -2175,6 +2183,10 @@ type EC2API interface {
GetInstanceMetadataDefaultsWithContext(aws.Context, *ec2.GetInstanceMetadataDefaultsInput, ...request.Option) (*ec2.GetInstanceMetadataDefaultsOutput, error)
GetInstanceMetadataDefaultsRequest(*ec2.GetInstanceMetadataDefaultsInput) (*request.Request, *ec2.GetInstanceMetadataDefaultsOutput)
+ GetInstanceTpmEkPub(*ec2.GetInstanceTpmEkPubInput) (*ec2.GetInstanceTpmEkPubOutput, error)
+ GetInstanceTpmEkPubWithContext(aws.Context, *ec2.GetInstanceTpmEkPubInput, ...request.Option) (*ec2.GetInstanceTpmEkPubOutput, error)
+ GetInstanceTpmEkPubRequest(*ec2.GetInstanceTpmEkPubInput) (*request.Request, *ec2.GetInstanceTpmEkPubOutput)
+
GetInstanceTypesFromInstanceRequirements(*ec2.GetInstanceTypesFromInstanceRequirementsInput) (*ec2.GetInstanceTypesFromInstanceRequirementsOutput, error)
GetInstanceTypesFromInstanceRequirementsWithContext(aws.Context, *ec2.GetInstanceTypesFromInstanceRequirementsInput, ...request.Option) (*ec2.GetInstanceTypesFromInstanceRequirementsOutput, error)
GetInstanceTypesFromInstanceRequirementsRequest(*ec2.GetInstanceTypesFromInstanceRequirementsInput) (*request.Request, *ec2.GetInstanceTypesFromInstanceRequirementsOutput)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go b/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go
index cea1cf428b..c4c13e83a3 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go
@@ -18889,7 +18889,7 @@ type Blueprint struct {
// This parameter only applies to Lightsail for Research resources.
AppCategory *string `locationName:"appCategory" type:"string" enum:"AppCategory"`
- // The ID for the virtual private server image (app_wordpress_4_4 or app_lamp_7_0).
+ // The ID for the virtual private server image (app_wordpress_x_x or app_lamp_x_x).
BlueprintId *string `locationName:"blueprintId" type:"string"`
// The description of the blueprint.
@@ -19452,7 +19452,7 @@ func (s *BucketState) SetMessage(v string) *BucketState {
type Bundle struct {
_ struct{} `type:"structure"`
- // The bundle ID (micro_1_0).
+ // The bundle ID (micro_x_x).
BundleId *string `locationName:"bundleId" type:"string"`
// The number of vCPUs included in the bundle (2).
@@ -19461,7 +19461,7 @@ type Bundle struct {
// The size of the SSD (30).
DiskSizeInGb *int64 `locationName:"diskSizeInGb" type:"integer"`
- // The Amazon EC2 instance type (t2.micro).
+ // The instance type (micro).
InstanceType *string `locationName:"instanceType" type:"string"`
// A Boolean value indicating whether the bundle is active.
@@ -23902,7 +23902,7 @@ type CreateInstancesFromSnapshotInput struct {
AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"`
// The bundle of specification information for your virtual private server (or
- // instance), including the pricing plan (micro_1_0).
+ // instance), including the pricing plan (micro_x_x).
//
// BundleId is a required field
BundleId *string `locationName:"bundleId" type:"string" required:"true"`
@@ -23925,7 +23925,8 @@ type CreateInstancesFromSnapshotInput struct {
// The IP address type for the instance.
//
- // The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.
+ // The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack
+ // for IPv4 and IPv6.
//
// The default value is dualstack.
IpAddressType *string `locationName:"ipAddressType" type:"string" enum:"IpAddressType"`
@@ -24166,7 +24167,7 @@ type CreateInstancesInput struct {
// AvailabilityZone is a required field
AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"`
- // The ID for a virtual private server image (app_wordpress_4_4 or app_lamp_7_0).
+ // The ID for a virtual private server image (app_wordpress_x_x or app_lamp_x_x).
// Use the get blueprints operation to return a list of available images (or
// blueprints).
//
@@ -24179,7 +24180,7 @@ type CreateInstancesInput struct {
BlueprintId *string `locationName:"blueprintId" type:"string" required:"true"`
// The bundle of specification information for your virtual private server (or
- // instance), including the pricing plan (micro_1_0).
+ // instance), including the pricing plan (medium_x_x).
//
// BundleId is a required field
BundleId *string `locationName:"bundleId" type:"string" required:"true"`
@@ -24200,7 +24201,8 @@ type CreateInstancesInput struct {
// The IP address type for the instance.
//
- // The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.
+ // The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack
+ // for IPv4 and IPv6.
//
// The default value is dualstack.
IpAddressType *string `locationName:"ipAddressType" type:"string" enum:"IpAddressType"`
@@ -24519,7 +24521,8 @@ type CreateLoadBalancerInput struct {
// The IP address type for the load balancer.
//
- // The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.
+ // The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack
+ // for IPv4 and IPv6.
//
// The default value is dualstack.
IpAddressType *string `locationName:"ipAddressType" type:"string" enum:"IpAddressType"`
@@ -36212,13 +36215,13 @@ type Instance struct {
// The Amazon Resource Name (ARN) of the instance (arn:aws:lightsail:us-east-2:123456789101:Instance/244ad76f-8aad-4741-809f-12345EXAMPLE).
Arn *string `locationName:"arn" type:"string"`
- // The blueprint ID (os_amlinux_2016_03).
+ // The blueprint ID (amazon_linux_2023).
BlueprintId *string `locationName:"blueprintId" type:"string"`
- // The friendly name of the blueprint (Amazon Linux).
+ // The friendly name of the blueprint (Amazon Linux 2023).
BlueprintName *string `locationName:"blueprintName" type:"string"`
- // The bundle for the instance (micro_1_0).
+ // The bundle for the instance (micro_x_x).
BundleId *string `locationName:"bundleId" type:"string"`
// The timestamp when the instance was created (1479734909.17) in Unix time
@@ -36230,7 +36233,8 @@ type Instance struct {
// The IP address type of the instance.
//
- // The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.
+ // The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack
+ // for IPv4 and IPv6.
IpAddressType *string `locationName:"ipAddressType" type:"string" enum:"IpAddressType"`
// The IPv6 addresses of the instance.
@@ -36246,7 +36250,7 @@ type Instance struct {
// The metadata options for the Amazon Lightsail instance.
MetadataOptions *InstanceMetadataOptions `locationName:"metadataOptions" type:"structure"`
- // The name the user gave the instance (Amazon_Linux-1GB-Ohio-1).
+ // The name the user gave the instance (Amazon_Linux_2023-1).
Name *string `locationName:"name" type:"string"`
// Information about the public ports and monthly data transfer rates for the
@@ -37085,6 +37089,10 @@ type InstancePortInfo struct {
// an instance could not be reached. When you specify icmp as the protocol,
// you must specify the ICMP type using the fromPort parameter, and ICMP
// code using the toPort parameter.
+ //
+ // * icmp6 - Internet Control Message Protocol (ICMP) for IPv6. When you
+ // specify icmp6 as the protocol, you must specify the ICMP type using the
+ // fromPort parameter, and ICMP code using the toPort parameter.
Protocol *string `locationName:"protocol" type:"string" enum:"NetworkProtocol"`
// The last port in a range of open ports on an instance.
@@ -37261,6 +37269,10 @@ type InstancePortState struct {
// an instance could not be reached. When you specify icmp as the protocol,
// you must specify the ICMP type using the fromPort parameter, and ICMP
// code using the toPort parameter.
+ //
+ // * icmp6 - Internet Control Message Protocol (ICMP) for IPv6. When you
+ // specify icmp6 as the protocol, you must specify the ICMP type using the
+ // fromPort parameter, and ICMP code using the toPort parameter.
Protocol *string `locationName:"protocol" type:"string" enum:"NetworkProtocol"`
// Specifies whether the instance port is open or closed.
@@ -37358,12 +37370,12 @@ type InstanceSnapshot struct {
// An array of disk objects containing information about all block storage disks.
FromAttachedDisks []*Disk `locationName:"fromAttachedDisks" type:"list"`
- // The blueprint ID from which you created the snapshot (os_debian_8_3). A blueprint
- // is a virtual private server (or instance) image used to create instances
- // quickly.
+ // The blueprint ID from which you created the snapshot (amazon_linux_2023).
+ // A blueprint is a virtual private server (or instance) image used to create
+ // instances quickly.
FromBlueprintId *string `locationName:"fromBlueprintId" type:"string"`
- // The bundle ID from which you created the snapshot (micro_1_0).
+ // The bundle ID from which you created the snapshot (micro_x_x).
FromBundleId *string `locationName:"fromBundleId" type:"string"`
// The Amazon Resource Name (ARN) of the instance from which the snapshot was
@@ -37525,10 +37537,10 @@ func (s *InstanceSnapshot) SetTags(v []*Tag) *InstanceSnapshot {
type InstanceSnapshotInfo struct {
_ struct{} `type:"structure"`
- // The blueprint ID from which the source instance (os_debian_8_3).
+ // The blueprint ID from which the source instance (amazon_linux_2023).
FromBlueprintId *string `locationName:"fromBlueprintId" type:"string"`
- // The bundle ID from which the source instance was created (micro_1_0).
+ // The bundle ID from which the source instance was created (micro_x_x).
FromBundleId *string `locationName:"fromBundleId" type:"string"`
// A list of objects describing the disks that were attached to the source instance.
@@ -38103,7 +38115,8 @@ type LoadBalancer struct {
// The IP address type of the load balancer.
//
- // The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.
+ // The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack
+ // for IPv4 and IPv6.
IpAddressType *string `locationName:"ipAddressType" type:"string" enum:"IpAddressType"`
// The AWS Region where your load balancer was created (us-east-2a). Lightsail
@@ -39987,6 +40000,10 @@ type PortInfo struct {
// an instance could not be reached. When you specify icmp as the protocol,
// you must specify the ICMP type using the fromPort parameter, and ICMP
// code using the toPort parameter.
+ //
+ // * icmp6 - Internet Control Message Protocol (ICMP) for IPv6. When you
+ // specify icmp6 as the protocol, you must specify the ICMP type using the
+ // fromPort parameter, and ICMP code using the toPort parameter.
Protocol *string `locationName:"protocol" type:"string" enum:"NetworkProtocol"`
// The last port in a range of open ports on an instance.
@@ -42604,9 +42621,21 @@ func (s *Session) SetUrl(v string) *Session {
type SetIpAddressTypeInput struct {
_ struct{} `type:"structure"`
+ // Required parameter to accept the instance bundle update when changing to,
+ // and from, IPv6-only.
+ //
+ // An instance bundle will change when switching from dual-stack or ipv4, to
+ // ipv6. It also changes when switching from ipv6, to dual-stack or ipv4.
+ //
+ // You must include this parameter in the command to update the bundle. For
+ // example, if you switch from dual-stack to ipv6, the bundle will be updated,
+ // and billing for the IPv6-only instance bundle begins immediately.
+ AcceptBundleUpdate *bool `locationName:"acceptBundleUpdate" type:"boolean"`
+
// The IP address type to set for the specified resource.
//
- // The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6.
+ // The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack
+ // for IPv4 and IPv6.
//
// IpAddressType is a required field
IpAddressType *string `locationName:"ipAddressType" type:"string" required:"true" enum:"IpAddressType"`
@@ -42665,6 +42694,12 @@ func (s *SetIpAddressTypeInput) Validate() error {
return nil
}
+// SetAcceptBundleUpdate sets the AcceptBundleUpdate field's value.
+func (s *SetIpAddressTypeInput) SetAcceptBundleUpdate(v bool) *SetIpAddressTypeInput {
+ s.AcceptBundleUpdate = &v
+ return s
+}
+
// SetIpAddressType sets the IpAddressType field's value.
func (s *SetIpAddressTypeInput) SetIpAddressType(v string) *SetIpAddressTypeInput {
s.IpAddressType = &v
@@ -46783,6 +46818,9 @@ const (
// IpAddressTypeIpv4 is a IpAddressType enum value
IpAddressTypeIpv4 = "ipv4"
+
+ // IpAddressTypeIpv6 is a IpAddressType enum value
+ IpAddressTypeIpv6 = "ipv6"
)
// IpAddressType_Values returns all elements of the IpAddressType enum
@@ -46790,6 +46828,7 @@ func IpAddressType_Values() []string {
return []string{
IpAddressTypeDualstack,
IpAddressTypeIpv4,
+ IpAddressTypeIpv6,
}
}
@@ -47389,6 +47428,9 @@ const (
// NetworkProtocolIcmp is a NetworkProtocol enum value
NetworkProtocolIcmp = "icmp"
+
+ // NetworkProtocolIcmpv6 is a NetworkProtocol enum value
+ NetworkProtocolIcmpv6 = "icmpv6"
)
// NetworkProtocol_Values returns all elements of the NetworkProtocol enum
@@ -47398,6 +47440,7 @@ func NetworkProtocol_Values() []string {
NetworkProtocolAll,
NetworkProtocolUdp,
NetworkProtocolIcmp,
+ NetworkProtocolIcmpv6,
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go
index 04f6c811b6..827bd51942 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go
@@ -179,8 +179,8 @@ func (c *SSOOIDC) CreateTokenWithIAMRequest(input *CreateTokenWithIAMInput) (req
//
// Creates and returns access and refresh tokens for clients and applications
// that are authenticated using IAM entities. The access token can be used to
-// fetch short-term credentials for the assigned AWS accounts or to access application
-// APIs using bearer authentication.
+// fetch short-term credentials for the assigned Amazon Web Services accounts
+// or to access application APIs using bearer authentication.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -331,6 +331,13 @@ func (c *SSOOIDC) RegisterClientRequest(input *RegisterClientInput) (req *reques
// Indicates that an error from the service occurred while trying to process
// a request.
//
+// - InvalidRedirectUriException
+// Indicates that one or more redirect URI in the request is not supported for
+// this operation.
+//
+// - UnsupportedGrantTypeException
+// Indicates that the grant type in the request is not supported by the service.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient
func (c *SSOOIDC) RegisterClient(input *RegisterClientInput) (*RegisterClientOutput, error) {
req, out := c.RegisterClientRequest(input)
@@ -619,6 +626,15 @@ type CreateTokenInput struct {
// type is currently unsupported for the CreateToken API.
Code *string `locationName:"code" type:"string"`
+ // Used only when calling this API for the Authorization Code grant type. This
+ // value is generated by the client and presented to validate the original code
+ // challenge value the client passed at authorization time.
+ //
+ // CodeVerifier is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenInput's
+ // String and GoString methods.
+ CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"`
+
// Used only when calling this API for the Device Code grant type. This short-term
// code is used to identify this authorization request. This comes from the
// result of the StartDeviceAuthorization API.
@@ -718,6 +734,12 @@ func (s *CreateTokenInput) SetCode(v string) *CreateTokenInput {
return s
}
+// SetCodeVerifier sets the CodeVerifier field's value.
+func (s *CreateTokenInput) SetCodeVerifier(v string) *CreateTokenInput {
+ s.CodeVerifier = &v
+ return s
+}
+
// SetDeviceCode sets the DeviceCode field's value.
func (s *CreateTokenInput) SetDeviceCode(v string) *CreateTokenInput {
s.DeviceCode = &v
@@ -751,7 +773,8 @@ func (s *CreateTokenInput) SetScope(v []*string) *CreateTokenInput {
type CreateTokenOutput struct {
_ struct{} `type:"structure"`
- // A bearer token to access AWS accounts and applications assigned to a user.
+ // A bearer token to access Amazon Web Services accounts and applications assigned
+ // to a user.
//
// AccessToken is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by CreateTokenOutput's
@@ -863,6 +886,15 @@ type CreateTokenWithIAMInput struct {
// persisted in the Authorization Code GrantOptions for the application.
Code *string `locationName:"code" type:"string"`
+ // Used only when calling this API for the Authorization Code grant type. This
+ // value is generated by the client and presented to validate the original code
+ // challenge value the client passed at authorization time.
+ //
+ // CodeVerifier is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's
+ // String and GoString methods.
+ CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"`
+
// Supports the following OAuth grant types: Authorization Code, Refresh Token,
// JWT Bearer, and Token Exchange. Specify one of the following values, depending
// on the grant type that you want:
@@ -982,6 +1014,12 @@ func (s *CreateTokenWithIAMInput) SetCode(v string) *CreateTokenWithIAMInput {
return s
}
+// SetCodeVerifier sets the CodeVerifier field's value.
+func (s *CreateTokenWithIAMInput) SetCodeVerifier(v string) *CreateTokenWithIAMInput {
+ s.CodeVerifier = &v
+ return s
+}
+
// SetGrantType sets the GrantType field's value.
func (s *CreateTokenWithIAMInput) SetGrantType(v string) *CreateTokenWithIAMInput {
s.GrantType = &v
@@ -1027,7 +1065,8 @@ func (s *CreateTokenWithIAMInput) SetSubjectTokenType(v string) *CreateTokenWith
type CreateTokenWithIAMOutput struct {
_ struct{} `type:"structure"`
- // A bearer token to access AWS accounts and applications assigned to a user.
+ // A bearer token to access Amazon Web Services accounts and applications assigned
+ // to a user.
//
// AccessToken is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's
@@ -1495,6 +1534,78 @@ func (s *InvalidGrantException) RequestID() string {
return s.RespMetadata.RequestID
}
+// Indicates that one or more redirect URI in the request is not supported for
+// this operation.
+type InvalidRedirectUriException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ // Single error code. For this exception the value will be invalid_redirect_uri.
+ Error_ *string `locationName:"error" type:"string"`
+
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
+ Error_description *string `locationName:"error_description" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidRedirectUriException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidRedirectUriException) GoString() string {
+ return s.String()
+}
+
+func newErrorInvalidRedirectUriException(v protocol.ResponseMetadata) error {
+ return &InvalidRedirectUriException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *InvalidRedirectUriException) Code() string {
+ return "InvalidRedirectUriException"
+}
+
+// Message returns the exception's message.
+func (s *InvalidRedirectUriException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *InvalidRedirectUriException) OrigErr() error {
+ return nil
+}
+
+func (s *InvalidRedirectUriException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *InvalidRedirectUriException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *InvalidRedirectUriException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
// Indicates that something is wrong with the input to the request. For example,
// a required parameter might be missing or out of range.
type InvalidRequestException struct {
@@ -1731,6 +1842,25 @@ type RegisterClientInput struct {
// ClientType is a required field
ClientType *string `locationName:"clientType" type:"string" required:"true"`
+ // This IAM Identity Center application ARN is used to define administrator-managed
+ // configuration for public client access to resources. At authorization, the
+ // scopes, grants, and redirect URI available to this client will be restricted
+ // by this application resource.
+ EntitledApplicationArn *string `locationName:"entitledApplicationArn" type:"string"`
+
+ // The list of OAuth 2.0 grant types that are defined by the client. This list
+ // is used to restrict the token granting flows available to the client.
+ GrantTypes []*string `locationName:"grantTypes" type:"list"`
+
+ // The IAM Identity Center Issuer URL associated with an instance of IAM Identity
+ // Center. This value is needed for user access to resources through the client.
+ IssuerUrl *string `locationName:"issuerUrl" type:"string"`
+
+ // The list of redirect URI that are defined by the client. At completion of
+ // authorization, this list is used to restrict what locations the user agent
+ // can be redirected back to.
+ RedirectUris []*string `locationName:"redirectUris" type:"list"`
+
// The list of scopes that are defined by the client. Upon authorization, this
// list is used to restrict permissions when granting an access token.
Scopes []*string `locationName:"scopes" type:"list"`
@@ -1782,6 +1912,30 @@ func (s *RegisterClientInput) SetClientType(v string) *RegisterClientInput {
return s
}
+// SetEntitledApplicationArn sets the EntitledApplicationArn field's value.
+func (s *RegisterClientInput) SetEntitledApplicationArn(v string) *RegisterClientInput {
+ s.EntitledApplicationArn = &v
+ return s
+}
+
+// SetGrantTypes sets the GrantTypes field's value.
+func (s *RegisterClientInput) SetGrantTypes(v []*string) *RegisterClientInput {
+ s.GrantTypes = v
+ return s
+}
+
+// SetIssuerUrl sets the IssuerUrl field's value.
+func (s *RegisterClientInput) SetIssuerUrl(v string) *RegisterClientInput {
+ s.IssuerUrl = &v
+ return s
+}
+
+// SetRedirectUris sets the RedirectUris field's value.
+func (s *RegisterClientInput) SetRedirectUris(v []*string) *RegisterClientInput {
+ s.RedirectUris = v
+ return s
+}
+
// SetScopes sets the Scopes field's value.
func (s *RegisterClientInput) SetScopes(v []*string) *RegisterClientInput {
s.Scopes = v
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go
index e6242e4928..cadf4584d2 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go
@@ -57,6 +57,13 @@ const (
// makes a CreateToken request with an invalid grant type.
ErrCodeInvalidGrantException = "InvalidGrantException"
+ // ErrCodeInvalidRedirectUriException for service response error code
+ // "InvalidRedirectUriException".
+ //
+ // Indicates that one or more redirect URI in the request is not supported for
+ // this operation.
+ ErrCodeInvalidRedirectUriException = "InvalidRedirectUriException"
+
// ErrCodeInvalidRequestException for service response error code
// "InvalidRequestException".
//
@@ -106,6 +113,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
"InvalidClientException": newErrorInvalidClientException,
"InvalidClientMetadataException": newErrorInvalidClientMetadataException,
"InvalidGrantException": newErrorInvalidGrantException,
+ "InvalidRedirectUriException": newErrorInvalidRedirectUriException,
"InvalidRequestException": newErrorInvalidRequestException,
"InvalidRequestRegionException": newErrorInvalidRequestRegionException,
"InvalidScopeException": newErrorInvalidScopeException,
diff --git a/vendor/github.com/benbjohnson/clock/LICENSE b/vendor/github.com/benbjohnson/clock/LICENSE
new file mode 100644
index 0000000000..ce212cb1ce
--- /dev/null
+++ b/vendor/github.com/benbjohnson/clock/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Ben Johnson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/benbjohnson/clock/README.md b/vendor/github.com/benbjohnson/clock/README.md
new file mode 100644
index 0000000000..4f1f82fc6d
--- /dev/null
+++ b/vendor/github.com/benbjohnson/clock/README.md
@@ -0,0 +1,105 @@
+clock
+=====
+
+[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/benbjohnson/clock)
+
+Clock is a small library for mocking time in Go. It provides an interface
+around the standard library's [`time`][time] package so that the application
+can use the realtime clock while tests can use the mock clock.
+
+The module is currently maintained by @djmitche.
+
+[time]: https://pkg.go.dev/github.com/benbjohnson/clock
+
+## Usage
+
+### Realtime Clock
+
+Your application can maintain a `Clock` variable that will allow realtime and
+mock clocks to be interchangeable. For example, if you had an `Application` type:
+
+```go
+import "github.com/benbjohnson/clock"
+
+type Application struct {
+ Clock clock.Clock
+}
+```
+
+You could initialize it to use the realtime clock like this:
+
+```go
+var app Application
+app.Clock = clock.New()
+...
+```
+
+Then all timers and time-related functionality should be performed from the
+`Clock` variable.
+
+
+### Mocking time
+
+In your tests, you will want to use a `Mock` clock:
+
+```go
+import (
+ "testing"
+
+ "github.com/benbjohnson/clock"
+)
+
+func TestApplication_DoSomething(t *testing.T) {
+ mock := clock.NewMock()
+ app := Application{Clock: mock}
+ ...
+}
+```
+
+Now that you've initialized your application to use the mock clock, you can
+adjust the time programmatically. The mock clock always starts from the Unix
+epoch (midnight UTC on Jan 1, 1970).
+
+
+### Controlling time
+
+The mock clock provides the same functions that the standard library's `time`
+package provides. For example, to find the current time, you use the `Now()`
+function:
+
+```go
+mock := clock.NewMock()
+
+// Find the current time.
+mock.Now().UTC() // 1970-01-01 00:00:00 +0000 UTC
+
+// Move the clock forward.
+mock.Add(2 * time.Hour)
+
+// Check the time again. It's 2 hours later!
+mock.Now().UTC() // 1970-01-01 02:00:00 +0000 UTC
+```
+
+Timers and Tickers are also controlled by this same mock clock. They will only
+execute when the clock is moved forward:
+
+```go
+mock := clock.NewMock()
+count := 0
+
+// Kick off a timer to increment every 1 mock second.
+go func() {
+ ticker := mock.Ticker(1 * time.Second)
+ for {
+ <-ticker.C
+ count++
+ }
+}()
+runtime.Gosched()
+
+// Move the clock forward 10 seconds.
+mock.Add(10 * time.Second)
+
+// This prints 10.
+fmt.Println(count)
+```
diff --git a/vendor/github.com/benbjohnson/clock/clock.go b/vendor/github.com/benbjohnson/clock/clock.go
new file mode 100644
index 0000000000..14ddc0795b
--- /dev/null
+++ b/vendor/github.com/benbjohnson/clock/clock.go
@@ -0,0 +1,422 @@
+package clock
+
+import (
+ "context"
+ "sort"
+ "sync"
+ "time"
+)
+
+// Re-export of time.Duration
+type Duration = time.Duration
+
+// Clock represents an interface to the functions in the standard library time
+// package. Two implementations are available in the clock package. The first
+// is a real-time clock which simply wraps the time package's functions. The
+// second is a mock clock which will only change when
+// programmatically adjusted.
+type Clock interface {
+ After(d time.Duration) <-chan time.Time
+ AfterFunc(d time.Duration, f func()) *Timer
+ Now() time.Time
+ Since(t time.Time) time.Duration
+ Until(t time.Time) time.Duration
+ Sleep(d time.Duration)
+ Tick(d time.Duration) <-chan time.Time
+ Ticker(d time.Duration) *Ticker
+ Timer(d time.Duration) *Timer
+ WithDeadline(parent context.Context, d time.Time) (context.Context, context.CancelFunc)
+ WithTimeout(parent context.Context, t time.Duration) (context.Context, context.CancelFunc)
+}
+
+// New returns an instance of a real-time clock.
+func New() Clock {
+ return &clock{}
+}
+
+// clock implements a real-time clock by simply wrapping the time package functions.
+type clock struct{}
+
+func (c *clock) After(d time.Duration) <-chan time.Time { return time.After(d) }
+
+func (c *clock) AfterFunc(d time.Duration, f func()) *Timer {
+ return &Timer{timer: time.AfterFunc(d, f)}
+}
+
+func (c *clock) Now() time.Time { return time.Now() }
+
+func (c *clock) Since(t time.Time) time.Duration { return time.Since(t) }
+
+func (c *clock) Until(t time.Time) time.Duration { return time.Until(t) }
+
+func (c *clock) Sleep(d time.Duration) { time.Sleep(d) }
+
+func (c *clock) Tick(d time.Duration) <-chan time.Time { return time.Tick(d) }
+
+func (c *clock) Ticker(d time.Duration) *Ticker {
+ t := time.NewTicker(d)
+ return &Ticker{C: t.C, ticker: t}
+}
+
+func (c *clock) Timer(d time.Duration) *Timer {
+ t := time.NewTimer(d)
+ return &Timer{C: t.C, timer: t}
+}
+
+func (c *clock) WithDeadline(parent context.Context, d time.Time) (context.Context, context.CancelFunc) {
+ return context.WithDeadline(parent, d)
+}
+
+func (c *clock) WithTimeout(parent context.Context, t time.Duration) (context.Context, context.CancelFunc) {
+ return context.WithTimeout(parent, t)
+}
+
+// Mock represents a mock clock that only moves forward programmically.
+// It can be preferable to a real-time clock when testing time-based functionality.
+type Mock struct {
+ // mu protects all other fields in this struct, and the data that they
+ // point to.
+ mu sync.Mutex
+
+ now time.Time // current time
+ timers clockTimers // tickers & timers
+}
+
+// NewMock returns an instance of a mock clock.
+// The current time of the mock clock on initialization is the Unix epoch.
+func NewMock() *Mock {
+ return &Mock{now: time.Unix(0, 0)}
+}
+
+// Add moves the current time of the mock clock forward by the specified duration.
+// This should only be called from a single goroutine at a time.
+func (m *Mock) Add(d time.Duration) {
+ // Calculate the final current time.
+ m.mu.Lock()
+ t := m.now.Add(d)
+ m.mu.Unlock()
+
+ // Continue to execute timers until there are no more before the new time.
+ for {
+ if !m.runNextTimer(t) {
+ break
+ }
+ }
+
+ // Ensure that we end with the new time.
+ m.mu.Lock()
+ m.now = t
+ m.mu.Unlock()
+
+ // Give a small buffer to make sure that other goroutines get handled.
+ gosched()
+}
+
+// Set sets the current time of the mock clock to a specific one.
+// This should only be called from a single goroutine at a time.
+func (m *Mock) Set(t time.Time) {
+ // Continue to execute timers until there are no more before the new time.
+ for {
+ if !m.runNextTimer(t) {
+ break
+ }
+ }
+
+ // Ensure that we end with the new time.
+ m.mu.Lock()
+ m.now = t
+ m.mu.Unlock()
+
+ // Give a small buffer to make sure that other goroutines get handled.
+ gosched()
+}
+
+// WaitForAllTimers sets the clock until all timers are expired
+func (m *Mock) WaitForAllTimers() time.Time {
+ // Continue to execute timers until there are no more
+ for {
+ m.mu.Lock()
+ if len(m.timers) == 0 {
+ m.mu.Unlock()
+ return m.Now()
+ }
+
+ sort.Sort(m.timers)
+ next := m.timers[len(m.timers)-1].Next()
+ m.mu.Unlock()
+ m.Set(next)
+ }
+}
+
+// runNextTimer executes the next timer in chronological order and moves the
+// current time to the timer's next tick time. The next time is not executed if
+// its next time is after the max time. Returns true if a timer was executed.
+func (m *Mock) runNextTimer(max time.Time) bool {
+ m.mu.Lock()
+
+ // Sort timers by time.
+ sort.Sort(m.timers)
+
+ // If we have no more timers then exit.
+ if len(m.timers) == 0 {
+ m.mu.Unlock()
+ return false
+ }
+
+ // Retrieve next timer. Exit if next tick is after new time.
+ t := m.timers[0]
+ if t.Next().After(max) {
+ m.mu.Unlock()
+ return false
+ }
+
+ // Move "now" forward and unlock clock.
+ m.now = t.Next()
+ now := m.now
+ m.mu.Unlock()
+
+ // Execute timer.
+ t.Tick(now)
+ return true
+}
+
+// After waits for the duration to elapse and then sends the current time on the returned channel.
+func (m *Mock) After(d time.Duration) <-chan time.Time {
+ return m.Timer(d).C
+}
+
+// AfterFunc waits for the duration to elapse and then executes a function in its own goroutine.
+// A Timer is returned that can be stopped.
+func (m *Mock) AfterFunc(d time.Duration, f func()) *Timer {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ ch := make(chan time.Time, 1)
+ t := &Timer{
+ c: ch,
+ fn: f,
+ mock: m,
+ next: m.now.Add(d),
+ stopped: false,
+ }
+ m.timers = append(m.timers, (*internalTimer)(t))
+ return t
+}
+
+// Now returns the current wall time on the mock clock.
+func (m *Mock) Now() time.Time {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ return m.now
+}
+
+// Since returns time since `t` using the mock clock's wall time.
+func (m *Mock) Since(t time.Time) time.Duration {
+ return m.Now().Sub(t)
+}
+
+// Until returns time until `t` using the mock clock's wall time.
+func (m *Mock) Until(t time.Time) time.Duration {
+ return t.Sub(m.Now())
+}
+
+// Sleep pauses the goroutine for the given duration on the mock clock.
+// The clock must be moved forward in a separate goroutine.
+func (m *Mock) Sleep(d time.Duration) {
+ <-m.After(d)
+}
+
+// Tick is a convenience function for Ticker().
+// It will return a ticker channel that cannot be stopped.
+func (m *Mock) Tick(d time.Duration) <-chan time.Time {
+ return m.Ticker(d).C
+}
+
+// Ticker creates a new instance of Ticker.
+func (m *Mock) Ticker(d time.Duration) *Ticker {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ ch := make(chan time.Time, 1)
+ t := &Ticker{
+ C: ch,
+ c: ch,
+ mock: m,
+ d: d,
+ next: m.now.Add(d),
+ }
+ m.timers = append(m.timers, (*internalTicker)(t))
+ return t
+}
+
+// Timer creates a new instance of Timer.
+func (m *Mock) Timer(d time.Duration) *Timer {
+ m.mu.Lock()
+ ch := make(chan time.Time, 1)
+ t := &Timer{
+ C: ch,
+ c: ch,
+ mock: m,
+ next: m.now.Add(d),
+ stopped: false,
+ }
+ m.timers = append(m.timers, (*internalTimer)(t))
+ now := m.now
+ m.mu.Unlock()
+ m.runNextTimer(now)
+ return t
+}
+
+// removeClockTimer removes a timer from m.timers. m.mu MUST be held
+// when this method is called.
+func (m *Mock) removeClockTimer(t clockTimer) {
+ for i, timer := range m.timers {
+ if timer == t {
+ copy(m.timers[i:], m.timers[i+1:])
+ m.timers[len(m.timers)-1] = nil
+ m.timers = m.timers[:len(m.timers)-1]
+ break
+ }
+ }
+ sort.Sort(m.timers)
+}
+
+// clockTimer represents an object with an associated start time.
+type clockTimer interface {
+ Next() time.Time
+ Tick(time.Time)
+}
+
+// clockTimers represents a list of sortable timers.
+type clockTimers []clockTimer
+
+func (a clockTimers) Len() int { return len(a) }
+func (a clockTimers) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a clockTimers) Less(i, j int) bool { return a[i].Next().Before(a[j].Next()) }
+
+// Timer represents a single event.
+// The current time will be sent on C, unless the timer was created by AfterFunc.
+type Timer struct {
+ C <-chan time.Time
+ c chan time.Time
+ timer *time.Timer // realtime impl, if set
+ next time.Time // next tick time
+ mock *Mock // mock clock, if set
+ fn func() // AfterFunc function, if set
+ stopped bool // True if stopped, false if running
+}
+
+// Stop turns off the ticker.
+func (t *Timer) Stop() bool {
+ if t.timer != nil {
+ return t.timer.Stop()
+ }
+
+ t.mock.mu.Lock()
+ registered := !t.stopped
+ t.mock.removeClockTimer((*internalTimer)(t))
+ t.stopped = true
+ t.mock.mu.Unlock()
+ return registered
+}
+
+// Reset changes the expiry time of the timer
+func (t *Timer) Reset(d time.Duration) bool {
+ if t.timer != nil {
+ return t.timer.Reset(d)
+ }
+
+ t.mock.mu.Lock()
+ t.next = t.mock.now.Add(d)
+ defer t.mock.mu.Unlock()
+
+ registered := !t.stopped
+ if t.stopped {
+ t.mock.timers = append(t.mock.timers, (*internalTimer)(t))
+ }
+
+ t.stopped = false
+ return registered
+}
+
+type internalTimer Timer
+
+func (t *internalTimer) Next() time.Time { return t.next }
+func (t *internalTimer) Tick(now time.Time) {
+ // a gosched() after ticking, to allow any consequences of the
+ // tick to complete
+ defer gosched()
+
+ t.mock.mu.Lock()
+ if t.fn != nil {
+ // defer function execution until the lock is released, and
+ defer func() { go t.fn() }()
+ } else {
+ t.c <- now
+ }
+ t.mock.removeClockTimer((*internalTimer)(t))
+ t.stopped = true
+ t.mock.mu.Unlock()
+}
+
+// Ticker holds a channel that receives "ticks" at regular intervals.
+type Ticker struct {
+ C <-chan time.Time
+ c chan time.Time
+ ticker *time.Ticker // realtime impl, if set
+ next time.Time // next tick time
+ mock *Mock // mock clock, if set
+ d time.Duration // time between ticks
+ stopped bool // True if stopped, false if running
+}
+
+// Stop turns off the ticker.
+func (t *Ticker) Stop() {
+ if t.ticker != nil {
+ t.ticker.Stop()
+ } else {
+ t.mock.mu.Lock()
+ t.mock.removeClockTimer((*internalTicker)(t))
+ t.stopped = true
+ t.mock.mu.Unlock()
+ }
+}
+
+// Reset resets the ticker to a new duration.
+func (t *Ticker) Reset(dur time.Duration) {
+ if t.ticker != nil {
+ t.ticker.Reset(dur)
+ return
+ }
+
+ t.mock.mu.Lock()
+ defer t.mock.mu.Unlock()
+
+ if t.stopped {
+ t.mock.timers = append(t.mock.timers, (*internalTicker)(t))
+ t.stopped = false
+ }
+
+ t.d = dur
+ t.next = t.mock.now.Add(dur)
+}
+
+type internalTicker Ticker
+
+func (t *internalTicker) Next() time.Time { return t.next }
+func (t *internalTicker) Tick(now time.Time) {
+ select {
+ case t.c <- now:
+ default:
+ }
+ t.mock.mu.Lock()
+ t.next = now.Add(t.d)
+ t.mock.mu.Unlock()
+ gosched()
+}
+
+// Sleep momentarily so that other goroutines can process.
+func gosched() { time.Sleep(1 * time.Millisecond) }
+
+var (
+ // type checking
+ _ Clock = &Mock{}
+)
diff --git a/vendor/github.com/benbjohnson/clock/context.go b/vendor/github.com/benbjohnson/clock/context.go
new file mode 100644
index 0000000000..eb67594f2c
--- /dev/null
+++ b/vendor/github.com/benbjohnson/clock/context.go
@@ -0,0 +1,86 @@
+package clock
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+)
+
+func (m *Mock) WithTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) {
+ return m.WithDeadline(parent, m.Now().Add(timeout))
+}
+
+func (m *Mock) WithDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) {
+ if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
+ // The current deadline is already sooner than the new one.
+ return context.WithCancel(parent)
+ }
+ ctx := &timerCtx{clock: m, parent: parent, deadline: deadline, done: make(chan struct{})}
+ propagateCancel(parent, ctx)
+ dur := m.Until(deadline)
+ if dur <= 0 {
+ ctx.cancel(context.DeadlineExceeded) // deadline has already passed
+ return ctx, func() {}
+ }
+ ctx.Lock()
+ defer ctx.Unlock()
+ if ctx.err == nil {
+ ctx.timer = m.AfterFunc(dur, func() {
+ ctx.cancel(context.DeadlineExceeded)
+ })
+ }
+ return ctx, func() { ctx.cancel(context.Canceled) }
+}
+
+// propagateCancel arranges for child to be canceled when parent is.
+func propagateCancel(parent context.Context, child *timerCtx) {
+ if parent.Done() == nil {
+ return // parent is never canceled
+ }
+ go func() {
+ select {
+ case <-parent.Done():
+ child.cancel(parent.Err())
+ case <-child.Done():
+ }
+ }()
+}
+
+type timerCtx struct {
+ sync.Mutex
+
+ clock Clock
+ parent context.Context
+ deadline time.Time
+ done chan struct{}
+
+ err error
+ timer *Timer
+}
+
+func (c *timerCtx) cancel(err error) {
+ c.Lock()
+ defer c.Unlock()
+ if c.err != nil {
+ return // already canceled
+ }
+ c.err = err
+ close(c.done)
+ if c.timer != nil {
+ c.timer.Stop()
+ c.timer = nil
+ }
+}
+
+func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { return c.deadline, true }
+
+func (c *timerCtx) Done() <-chan struct{} { return c.done }
+
+func (c *timerCtx) Err() error { return c.err }
+
+func (c *timerCtx) Value(key interface{}) interface{} { return c.parent.Value(key) }
+
+func (c *timerCtx) String() string {
+ return fmt.Sprintf("clock.WithDeadline(%s [%s])", c.deadline, c.deadline.Sub(c.clock.Now()))
+}
diff --git a/vendor/github.com/briandowns/spinner/.gitignore b/vendor/github.com/briandowns/spinner/.gitignore
new file mode 100644
index 0000000000..21ec6b71b7
--- /dev/null
+++ b/vendor/github.com/briandowns/spinner/.gitignore
@@ -0,0 +1,29 @@
+# Created by .gitignore support plugin (hsz.mobi)
+### Go template
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
+.idea
+*.iml
diff --git a/vendor/github.com/briandowns/spinner/.travis.yml b/vendor/github.com/briandowns/spinner/.travis.yml
new file mode 100644
index 0000000000..74d205aec0
--- /dev/null
+++ b/vendor/github.com/briandowns/spinner/.travis.yml
@@ -0,0 +1,18 @@
+arch:
+ - amd64
+ - ppc64le
+language: go
+go:
+ - 1.16
+ - 1.17.5
+env:
+ - GOARCH: amd64
+ - GOARCH: 386
+script:
+ - go test -v
+notifications:
+ email:
+ recipients:
+ - brian.downs@gmail.com
+ on_success: change
+ on_failure: always
diff --git a/vendor/github.com/briandowns/spinner/LICENSE b/vendor/github.com/briandowns/spinner/LICENSE
new file mode 100644
index 0000000000..dd5b3a58aa
--- /dev/null
+++ b/vendor/github.com/briandowns/spinner/LICENSE
@@ -0,0 +1,174 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/briandowns/spinner/Makefile b/vendor/github.com/briandowns/spinner/Makefile
new file mode 100644
index 0000000000..3cfdeb23c6
--- /dev/null
+++ b/vendor/github.com/briandowns/spinner/Makefile
@@ -0,0 +1,20 @@
+GO = go
+
+.PHONY: deps
+deps: go.mod
+
+go.mod:
+ go mod init
+ go mod tidy
+
+.PHONY: test
+test:
+ $(GO) test -v -cover ./...
+
+.PHONY: check
+check:
+ if [ -d vendor ]; then cp -r vendor/* ${GOPATH}/src/; fi
+
+.PHONY: clean
+clean:
+ $(GO) clean
diff --git a/vendor/github.com/briandowns/spinner/NOTICE.txt b/vendor/github.com/briandowns/spinner/NOTICE.txt
new file mode 100644
index 0000000000..95e2a248b0
--- /dev/null
+++ b/vendor/github.com/briandowns/spinner/NOTICE.txt
@@ -0,0 +1,4 @@
+Spinner
+Copyright (c) 2022 Brian J. Downs
+This product is licensed to you under the Apache 2.0 license (the "License"). You may not use this product except in compliance with the Apache 2.0 License.
+This product may include a number of subcomponents with separate copyright notices and license terms. Your use of these subcomponents is subject to the terms and conditions of the subcomponent's license, as noted in the LICENSE file.
diff --git a/vendor/github.com/briandowns/spinner/README.md b/vendor/github.com/briandowns/spinner/README.md
new file mode 100644
index 0000000000..28b024d323
--- /dev/null
+++ b/vendor/github.com/briandowns/spinner/README.md
@@ -0,0 +1,285 @@
+# Spinner
+
+[![GoDoc](https://godoc.org/github.com/briandowns/spinner?status.svg)](https://godoc.org/github.com/briandowns/spinner) [![CircleCI](https://circleci.com/gh/briandowns/spinner.svg?style=svg)](https://circleci.com/gh/briandowns/spinner)
+
+spinner is a simple package to add a spinner / progress indicator to any terminal application. Examples can be found below as well as full examples in the examples directory.
+
+For more detail about the library and its features, reference your local godoc once installed.
+
+Contributions welcome!
+
+## Installation
+
+```bash
+go get github.com/briandowns/spinner
+```
+
+## Available Character Sets
+
+90 Character Sets. Some examples below:
+
+(Numbered by their slice index)
+
+| index | character set | sample gif |
+| ----- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- |
+| 0 | ```←↖↑↗→↘↓↙``` | ![Sample Gif](gifs/0.gif) |
+| 1 | ```▁▃▄▅▆▇█▇▆▅▄▃▁``` | ![Sample Gif](gifs/1.gif) |
+| 2 | ```▖▘▝▗``` | ![Sample Gif](gifs/2.gif) |
+| 3 | ```┤┘┴└├┌┬┐``` | ![Sample Gif](gifs/3.gif) |
+| 4 | ```◢◣◤◥``` | ![Sample Gif](gifs/4.gif) |
+| 5 | ```◰◳◲◱``` | ![Sample Gif](gifs/5.gif) |
+| 6 | ```◴◷◶◵``` | ![Sample Gif](gifs/6.gif) |
+| 7 | ```◐◓◑◒``` | ![Sample Gif](gifs/7.gif) |
+| 8 | ```.oO@*``` | ![Sample Gif](gifs/8.gif) |
+| 9 | ```\|/-\``` | ![Sample Gif](gifs/9.gif) |
+| 10 | ```◡◡⊙⊙◠◠``` | ![Sample Gif](gifs/10.gif) |
+| 11 | ```⣾⣽⣻⢿⡿⣟⣯⣷``` | ![Sample Gif](gifs/11.gif) |
+| 12 | ```>))'> >))'> >))'> >))'> >))'> <'((< <'((< <'((<``` | ![Sample Gif](gifs/12.gif) |
+| 13 | ```⠁⠂⠄⡀⢀⠠⠐⠈``` | ![Sample Gif](gifs/13.gif) |
+| 14 | ```⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏``` | ![Sample Gif](gifs/14.gif) |
+| 15 | ```abcdefghijklmnopqrstuvwxyz``` | ![Sample Gif](gifs/15.gif) |
+| 16 | ```▉▊▋▌▍▎▏▎▍▌▋▊▉``` | ![Sample Gif](gifs/16.gif) |
+| 17 | ```■□▪▫``` | ![Sample Gif](gifs/17.gif) |
+| 18 | ```←↑→↓``` | ![Sample Gif](gifs/18.gif) |
+| 19 | ```╫╪``` | ![Sample Gif](gifs/19.gif) |
+| 20 | ```⇐⇖⇑⇗⇒⇘⇓⇙``` | ![Sample Gif](gifs/20.gif) |
+| 21 | ```⠁⠁⠉⠙⠚⠒⠂⠂⠒⠲⠴⠤⠄⠄⠤⠠⠠⠤⠦⠖⠒⠐⠐⠒⠓⠋⠉⠈⠈``` | ![Sample Gif](gifs/21.gif) |
+| 22 | ```⠈⠉⠋⠓⠒⠐⠐⠒⠖⠦⠤⠠⠠⠤⠦⠖⠒⠐⠐⠒⠓⠋⠉⠈``` | ![Sample Gif](gifs/22.gif) |
+| 23 | ```⠁⠉⠙⠚⠒⠂⠂⠒⠲⠴⠤⠄⠄⠤⠴⠲⠒⠂⠂⠒⠚⠙⠉⠁``` | ![Sample Gif](gifs/23.gif) |
+| 24 | ```⠋⠙⠚⠒⠂⠂⠒⠲⠴⠦⠖⠒⠐⠐⠒⠓⠋``` | ![Sample Gif](gifs/24.gif) |
+| 25 | ```ヲァィゥェォャュョッアイウエオカキクケコサシスセソタチツテトナニヌネノハヒフヘホマミムメモヤユヨラリルレロワン``` | ![Sample Gif](gifs/25.gif) |
+| 26 | ```. .. ...``` | ![Sample Gif](gifs/26.gif) |
+| 27 | ```▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▏▎▍▌▋▊▉█▇▆▅▄▃▂▁``` | ![Sample Gif](gifs/27.gif) |
+| 28 | ```.oO°Oo.``` | ![Sample Gif](gifs/28.gif) |
+| 29 | ```+x``` | ![Sample Gif](gifs/29.gif) |
+| 30 | ```v<^>``` | ![Sample Gif](gifs/30.gif) |
+| 31 | ```>>---> >>---> >>---> >>---> >>---> <---<< <---<< <---<< <---<< <---<<``` | ![Sample Gif](gifs/31.gif) |
+| 32 | ```\| \|\| \|\|\| \|\|\|\| \|\|\|\|\| \|\|\|\|\|\| \|\|\|\|\| \|\|\|\| \|\|\| \|\| \|``` | ![Sample Gif](gifs/32.gif) |
+| 33 | ```[] [=] [==] [===] [====] [=====] [======] [=======] [========] [=========] [==========]``` | ![Sample Gif](gifs/33.gif) |
+| 34 | ```(*---------) (-*--------) (--*-------) (---*------) (----*-----) (-----*----) (------*---) (-------*--) (--------*-) (---------*)``` | ![Sample Gif](gifs/34.gif) |
+| 35 | ```█▒▒▒▒▒▒▒▒▒ ███▒▒▒▒▒▒▒ █████▒▒▒▒▒ ███████▒▒▒ ██████████``` | ![Sample Gif](gifs/35.gif) |
+| 36 | ```[ ] [=> ] [===> ] [=====> ] [======> ] [========> ] [==========> ] [============> ] [==============> ] [================> ] [==================> ] [===================>]``` | ![Sample Gif](gifs/36.gif) |
+| 37 | ```🕐 🕑 🕒 🕓 🕔 🕕 🕖 🕗 🕘 🕙 🕚 🕛``` | ![Sample Gif](gifs/37.gif) |
+| 38 | ```🕐 🕜 🕑 🕝 🕒 🕞 🕓 🕟 🕔 🕠 🕕 🕡 🕖 🕢 🕗 🕣 🕘 🕤 🕙 🕥 🕚 🕦 🕛 🕧``` | ![Sample Gif](gifs/38.gif) |
+| 39 | ```🌍 🌎 🌏``` | ![Sample Gif](gifs/39.gif) |
+| 40 | ```◜ ◝ ◞ ◟``` | ![Sample Gif](gifs/40.gif) |
+| 41 | ```⬒ ⬔ ⬓ ⬕``` | ![Sample Gif](gifs/41.gif) |
+| 42 | ```⬖ ⬘ ⬗ ⬙``` | ![Sample Gif](gifs/42.gif) |
+| 43 | ```[>>> >] []>>>> [] [] >>>> [] [] >>>> [] [] >>>> [] [] >>>>[] [>> >>]``` | ![Sample Gif](gifs/43.gif) |
+
+## Features
+
+* Start
+* Stop
+* Restart
+* Reverse direction
+* Update the spinner character set
+* Update the spinner speed
+* Prefix or append text
+* Change spinner color, background, and text attributes such as bold / italics
+* Get spinner status
+* Chain, pipe, redirect output
+* Output final string on spinner/indicator completion
+
+## Examples
+
+```Go
+package main
+
+import (
+ "github.com/briandowns/spinner"
+ "time"
+)
+
+func main() {
+ s := spinner.New(spinner.CharSets[9], 100*time.Millisecond) // Build our new spinner
+ s.Start() // Start the spinner
+ time.Sleep(4 * time.Second) // Run for some time to simulate work
+ s.Stop()
+}
+```
+
+## Update the character set and restart the spinner
+
+```Go
+s.UpdateCharSet(spinner.CharSets[1]) // Update spinner to use a different character set
+s.Restart() // Restart the spinner
+time.Sleep(4 * time.Second)
+s.Stop()
+```
+
+## Update spin speed and restart the spinner
+
+```Go
+s.UpdateSpeed(200 * time.Millisecond) // Update the speed the spinner spins at
+s.Restart()
+time.Sleep(4 * time.Second)
+s.Stop()
+```
+
+## Reverse the direction of the spinner
+
+```Go
+s.Reverse() // Reverse the direction the spinner is spinning
+s.Restart()
+time.Sleep(4 * time.Second)
+s.Stop()
+```
+
+## Provide your own spinner
+
+(or send me an issue or pull request to add to the project)
+
+```Go
+someSet := []string{"+", "-"}
+s := spinner.New(someSet, 100*time.Millisecond)
+```
+
+## Prefix or append text to the spinner
+
+```Go
+s.Prefix = "prefixed text: " // Prefix text before the spinner
+s.Suffix = " :appended text" // Append text after the spinner
+```
+
+## Set or change the color of the spinner. Default color is white. The spinner will need to be restarted to pick up the change.
+
+```Go
+s.Color("red") // Set the spinner color to red
+```
+
+You can specify both the background and foreground color, as well as additional attributes such as `bold` or `underline`.
+
+```Go
+s.Color("red", "bold") // Set the spinner color to a bold red
+```
+
+To set the background to black, the foreground to a bold red:
+
+```Go
+s.Color("bgBlack", "bold", "fgRed")
+```
+
+Below is the full color and attribute list:
+
+```Go
+// default colors
+red
+black
+green
+yellow
+blue
+magenta
+cyan
+white
+
+// attributes
+reset
+bold
+faint
+italic
+underline
+blinkslow
+blinkrapid
+reversevideo
+concealed
+crossedout
+
+// foreground text
+fgBlack
+fgRed
+fgGreen
+fgYellow
+fgBlue
+fgMagenta
+fgCyan
+fgWhite
+
+// foreground Hi-Intensity text
+fgHiBlack
+fgHiRed
+fgHiGreen
+fgHiYellow
+fgHiBlue
+fgHiMagenta
+fgHiCyan
+fgHiWhite
+
+// background text
+bgBlack
+bgRed
+bgGreen
+bgYellow
+bgBlue
+bgMagenta
+bgCyan
+bgWhite
+
+// background Hi-Intensity text
+bgHiBlack
+bgHiRed
+bgHiGreen
+bgHiYellow
+bgHiBlue
+bgHiMagenta
+bgHiCyan
+bgHiWhite
+```
+
+## Generate a sequence of numbers
+
+```Go
+setOfDigits := spinner.GenerateNumberSequence(25) // Generate a 25 digit string of numbers
+s := spinner.New(setOfDigits, 100*time.Millisecond)
+```
+
+## Get spinner status
+
+```Go
+fmt.Println(s.Active())
+```
+
+## Unix pipe and redirect
+
+Feature suggested and write up by [dekz](https://github.com/dekz)
+
+Setting the Spinner Writer to Stderr helps show progress to the user, with the enhancement to chain, pipe or redirect the output.
+
+This is the preferred method of setting a Writer at this time.
+
+```go
+s := spinner.New(spinner.CharSets[11], 100*time.Millisecond, spinner.WithWriter(os.Stderr))
+s.Suffix = " Encrypting data..."
+s.Start()
+// Encrypt the data into ciphertext
+fmt.Println(os.Stdout, ciphertext)
+```
+
+```sh
+> myprog encrypt "Secret text" > encrypted.txt
+⣯ Encrypting data...
+```
+
+```sh
+> cat encrypted.txt
+1243hjkbas23i9ah27sj39jghv237n2oa93hg83
+```
+
+## Final String Output
+
+Add additional output when the spinner/indicator has completed. The "final" output string can be multi-lined and will be written to wherever the `io.Writer` has been configured for.
+
+```Go
+s := spinner.New(spinner.CharSets[9], 100*time.Millisecond)
+s.FinalMSG = "Complete!\nNew line!\nAnother one!\n"
+s.Start()
+time.Sleep(4 * time.Second)
+s.Stop()
+```
+
+Output
+```sh
+Complete!
+New line!
+Another one!
+```
diff --git a/vendor/github.com/briandowns/spinner/character_sets.go b/vendor/github.com/briandowns/spinner/character_sets.go
new file mode 100644
index 0000000000..df41a0f2c5
--- /dev/null
+++ b/vendor/github.com/briandowns/spinner/character_sets.go
@@ -0,0 +1,121 @@
+// Copyright (c) 2022 Brian J. Downs
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spinner
+
+const (
+ clockOneOClock = '\U0001F550'
+ clockOneThirty = '\U0001F55C'
+)
+
+// CharSets contains the available character sets
+var CharSets = map[int][]string{
+ 0: {"←", "↖", "↑", "↗", "→", "↘", "↓", "↙"},
+ 1: {"▁", "▃", "▄", "▅", "▆", "▇", "█", "▇", "▆", "▅", "▄", "▃", "▁"},
+ 2: {"▖", "▘", "▝", "▗"},
+ 3: {"┤", "┘", "┴", "└", "├", "┌", "┬", "┐"},
+ 4: {"◢", "◣", "◤", "◥"},
+ 5: {"◰", "◳", "◲", "◱"},
+ 6: {"◴", "◷", "◶", "◵"},
+ 7: {"◐", "◓", "◑", "◒"},
+ 8: {".", "o", "O", "@", "*"},
+ 9: {"|", "/", "-", "\\"},
+ 10: {"◡◡", "⊙⊙", "◠◠"},
+ 11: {"⣾", "⣽", "⣻", "⢿", "⡿", "⣟", "⣯", "⣷"},
+ 12: {">))'>", " >))'>", " >))'>", " >))'>", " >))'>", " <'((<", " <'((<", " <'((<"},
+ 13: {"⠁", "⠂", "⠄", "⡀", "⢀", "⠠", "⠐", "⠈"},
+ 14: {"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"},
+ 15: {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"},
+ 16: {"▉", "▊", "▋", "▌", "▍", "▎", "▏", "▎", "▍", "▌", "▋", "▊", "▉"},
+ 17: {"■", "□", "▪", "▫"},
+
+ 18: {"←", "↑", "→", "↓"},
+ 19: {"╫", "╪"},
+ 20: {"⇐", "⇖", "⇑", "⇗", "⇒", "⇘", "⇓", "⇙"},
+ 21: {"⠁", "⠁", "⠉", "⠙", "⠚", "⠒", "⠂", "⠂", "⠒", "⠲", "⠴", "⠤", "⠄", "⠄", "⠤", "⠠", "⠠", "⠤", "⠦", "⠖", "⠒", "⠐", "⠐", "⠒", "⠓", "⠋", "⠉", "⠈", "⠈"},
+ 22: {"⠈", "⠉", "⠋", "⠓", "⠒", "⠐", "⠐", "⠒", "⠖", "⠦", "⠤", "⠠", "⠠", "⠤", "⠦", "⠖", "⠒", "⠐", "⠐", "⠒", "⠓", "⠋", "⠉", "⠈"},
+ 23: {"⠁", "⠉", "⠙", "⠚", "⠒", "⠂", "⠂", "⠒", "⠲", "⠴", "⠤", "⠄", "⠄", "⠤", "⠴", "⠲", "⠒", "⠂", "⠂", "⠒", "⠚", "⠙", "⠉", "⠁"},
+ 24: {"⠋", "⠙", "⠚", "⠒", "⠂", "⠂", "⠒", "⠲", "⠴", "⠦", "⠖", "⠒", "⠐", "⠐", "⠒", "⠓", "⠋"},
+ 25: {"ヲ", "ァ", "ィ", "ゥ", "ェ", "ォ", "ャ", "ュ", "ョ", "ッ", "ア", "イ", "ウ", "エ", "オ", "カ", "キ", "ク", "ケ", "コ", "サ", "シ", "ス", "セ", "ソ", "タ", "チ", "ツ", "テ", "ト", "ナ", "ニ", "ヌ", "ネ", "ノ", "ハ", "ヒ", "フ", "ヘ", "ホ", "マ", "ミ", "ム", "メ", "モ", "ヤ", "ユ", "ヨ", "ラ", "リ", "ル", "レ", "ロ", "ワ", "ン"},
+ 26: {".", "..", "..."},
+ 27: {"▁", "▂", "▃", "▄", "▅", "▆", "▇", "█", "▉", "▊", "▋", "▌", "▍", "▎", "▏", "▏", "▎", "▍", "▌", "▋", "▊", "▉", "█", "▇", "▆", "▅", "▄", "▃", "▂", "▁"},
+ 28: {".", "o", "O", "°", "O", "o", "."},
+ 29: {"+", "x"},
+ 30: {"v", "<", "^", ">"},
+ 31: {">>--->", " >>--->", " >>--->", " >>--->", " >>--->", " <---<<", " <---<<", " <---<<", " <---<<", "<---<<"},
+ 32: {"|", "||", "|||", "||||", "|||||", "|||||||", "||||||||", "|||||||", "||||||", "|||||", "||||", "|||", "||", "|"},
+ 33: {"[ ]", "[= ]", "[== ]", "[=== ]", "[==== ]", "[===== ]", "[====== ]", "[======= ]", "[======== ]", "[========= ]", "[==========]"},
+ 34: {"(*---------)", "(-*--------)", "(--*-------)", "(---*------)", "(----*-----)", "(-----*----)", "(------*---)", "(-------*--)", "(--------*-)", "(---------*)"},
+ 35: {"█▒▒▒▒▒▒▒▒▒", "███▒▒▒▒▒▒▒", "█████▒▒▒▒▒", "███████▒▒▒", "██████████"},
+ 36: {"[ ]", "[=> ]", "[===> ]", "[=====> ]", "[======> ]", "[========> ]", "[==========> ]", "[============> ]", "[==============> ]", "[================> ]", "[==================> ]", "[===================>]"},
+ 39: {"🌍", "🌎", "🌏"},
+ 40: {"◜", "◝", "◞", "◟"},
+ 41: {"⬒", "⬔", "⬓", "⬕"},
+ 42: {"⬖", "⬘", "⬗", "⬙"},
+ 43: {"[>>> >]", "[]>>>> []", "[] >>>> []", "[] >>>> []", "[] >>>> []", "[] >>>>[]", "[>> >>]"},
+ 44: {"♠", "♣", "♥", "♦"},
+ 45: {"➞", "➟", "➠", "➡", "➠", "➟"},
+ 46: {" | ", ` \ `, "_ ", ` \ `, " | ", " / ", " _", " / "},
+ 47: {" . . . .", ". . . .", ". . . .", ". . . .", ". . . . ", ". . . . ."},
+ 48: {" | ", " / ", " _ ", ` \ `, " | ", ` \ `, " _ ", " / "},
+ 49: {"⎺", "⎻", "⎼", "⎽", "⎼", "⎻"},
+ 50: {"▹▹▹▹▹", "▸▹▹▹▹", "▹▸▹▹▹", "▹▹▸▹▹", "▹▹▹▸▹", "▹▹▹▹▸"},
+ 51: {"[ ]", "[ =]", "[ ==]", "[ ===]", "[====]", "[=== ]", "[== ]", "[= ]"},
+ 52: {"( ● )", "( ● )", "( ● )", "( ● )", "( ●)", "( ● )", "( ● )", "( ● )", "( ● )"},
+ 53: {"✶", "✸", "✹", "✺", "✹", "✷"},
+ 54: {"▐|\\____________▌", "▐_|\\___________▌", "▐__|\\__________▌", "▐___|\\_________▌", "▐____|\\________▌", "▐_____|\\_______▌", "▐______|\\______▌", "▐_______|\\_____▌", "▐________|\\____▌", "▐_________|\\___▌", "▐__________|\\__▌", "▐___________|\\_▌", "▐____________|\\▌", "▐____________/|▌", "▐___________/|_▌", "▐__________/|__▌", "▐_________/|___▌", "▐________/|____▌", "▐_______/|_____▌", "▐______/|______▌", "▐_____/|_______▌", "▐____/|________▌", "▐___/|_________▌", "▐__/|__________▌", "▐_/|___________▌", "▐/|____________▌"},
+ 55: {"▐⠂ ▌", "▐⠈ ▌", "▐ ⠂ ▌", "▐ ⠠ ▌", "▐ ⡀ ▌", "▐ ⠠ ▌", "▐ ⠂ ▌", "▐ ⠈ ▌", "▐ ⠂ ▌", "▐ ⠠ ▌", "▐ ⡀ ▌", "▐ ⠠ ▌", "▐ ⠂ ▌", "▐ ⠈ ▌", "▐ ⠂▌", "▐ ⠠▌", "▐ ⡀▌", "▐ ⠠ ▌", "▐ ⠂ ▌", "▐ ⠈ ▌", "▐ ⠂ ▌", "▐ ⠠ ▌", "▐ ⡀ ▌", "▐ ⠠ ▌", "▐ ⠂ ▌", "▐ ⠈ ▌", "▐ ⠂ ▌", "▐ ⠠ ▌", "▐ ⡀ ▌", "▐⠠ ▌"},
+ 56: {"¿", "?"},
+ 57: {"⢹", "⢺", "⢼", "⣸", "⣇", "⡧", "⡗", "⡏"},
+ 58: {"⢄", "⢂", "⢁", "⡁", "⡈", "⡐", "⡠"},
+ 59: {". ", ".. ", "...", " ..", " .", " "},
+ 60: {".", "o", "O", "°", "O", "o", "."},
+ 61: {"▓", "▒", "░"},
+ 62: {"▌", "▀", "▐", "▄"},
+ 63: {"⊶", "⊷"},
+ 64: {"▪", "▫"},
+ 65: {"□", "■"},
+ 66: {"▮", "▯"},
+ 67: {"-", "=", "≡"},
+ 68: {"d", "q", "p", "b"},
+ 69: {"∙∙∙", "●∙∙", "∙●∙", "∙∙●", "∙∙∙"},
+ 70: {"🌑 ", "🌒 ", "🌓 ", "🌔 ", "🌕 ", "🌖 ", "🌗 ", "🌘 "},
+ 71: {"☗", "☖"},
+ 72: {"⧇", "⧆"},
+ 73: {"◉", "◎"},
+ 74: {"㊂", "㊀", "㊁"},
+ 75: {"⦾", "⦿"},
+ 76: {"ဝ", "၀"},
+ 77: {"▌", "▀", "▐▄"},
+ 78: {"⠈⠁", "⠈⠑", "⠈⠱", "⠈⡱", "⢀⡱", "⢄⡱", "⢄⡱", "⢆⡱", "⢎⡱", "⢎⡰", "⢎⡠", "⢎⡀", "⢎⠁", "⠎⠁", "⠊⠁"},
+ 79: {"________", "-_______", "_-______", "__-_____", "___-____", "____-___", "_____-__", "______-_", "_______-", "________", "_______-", "______-_", "_____-__", "____-___", "___-____", "__-_____", "_-______", "-_______", "________"},
+ 80: {"|_______", "_/______", "__-_____", "___\\____", "____|___", "_____/__", "______-_", "_______\\", "_______|", "______\\_", "_____-__", "____/___", "___|____", "__\\_____", "_-______"},
+ 81: {"□", "◱", "◧", "▣", "■"},
+ 82: {"□", "◱", "▨", "▩", "■"},
+ 83: {"░", "▒", "▓", "█"},
+ 84: {"░", "█"},
+ 85: {"⚪", "⚫"},
+ 86: {"◯", "⬤"},
+ 87: {"▱", "▰"},
+ 88: {"➊", "➋", "➌", "➍", "➎", "➏", "➐", "➑", "➒", "➓"},
+ 89: {"½", "⅓", "⅔", "¼", "¾", "⅛", "⅜", "⅝", "⅞"},
+ 90: {"↞", "↟", "↠", "↡"},
+}
+
+func init() {
+ for i := rune(0); i < 12; i++ {
+ CharSets[37] = append(CharSets[37], string([]rune{clockOneOClock + i}))
+ CharSets[38] = append(CharSets[38], string([]rune{clockOneOClock + i}), string([]rune{clockOneThirty + i}))
+ }
+}
diff --git a/vendor/github.com/briandowns/spinner/spinner.go b/vendor/github.com/briandowns/spinner/spinner.go
new file mode 100644
index 0000000000..97b1a8f74d
--- /dev/null
+++ b/vendor/github.com/briandowns/spinner/spinner.go
@@ -0,0 +1,557 @@
+// Copyright (c) 2021 Brian J. Downs
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package spinner is a simple package to add a spinner / progress indicator to any terminal application.
+package spinner
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+ "unicode/utf8"
+
+ "github.com/fatih/color"
+ "github.com/mattn/go-isatty"
+ "golang.org/x/term"
+)
+
+// errInvalidColor is returned when attempting to set an invalid color
+var errInvalidColor = errors.New("invalid color")
+
+// validColors holds an array of the only colors allowed
+var validColors = map[string]bool{
+ // default colors for backwards compatibility
+ "black": true,
+ "red": true,
+ "green": true,
+ "yellow": true,
+ "blue": true,
+ "magenta": true,
+ "cyan": true,
+ "white": true,
+
+ // attributes
+ "reset": true,
+ "bold": true,
+ "faint": true,
+ "italic": true,
+ "underline": true,
+ "blinkslow": true,
+ "blinkrapid": true,
+ "reversevideo": true,
+ "concealed": true,
+ "crossedout": true,
+
+ // foreground text
+ "fgBlack": true,
+ "fgRed": true,
+ "fgGreen": true,
+ "fgYellow": true,
+ "fgBlue": true,
+ "fgMagenta": true,
+ "fgCyan": true,
+ "fgWhite": true,
+
+ // foreground Hi-Intensity text
+ "fgHiBlack": true,
+ "fgHiRed": true,
+ "fgHiGreen": true,
+ "fgHiYellow": true,
+ "fgHiBlue": true,
+ "fgHiMagenta": true,
+ "fgHiCyan": true,
+ "fgHiWhite": true,
+
+ // background text
+ "bgBlack": true,
+ "bgRed": true,
+ "bgGreen": true,
+ "bgYellow": true,
+ "bgBlue": true,
+ "bgMagenta": true,
+ "bgCyan": true,
+ "bgWhite": true,
+
+ // background Hi-Intensity text
+ "bgHiBlack": true,
+ "bgHiRed": true,
+ "bgHiGreen": true,
+ "bgHiYellow": true,
+ "bgHiBlue": true,
+ "bgHiMagenta": true,
+ "bgHiCyan": true,
+ "bgHiWhite": true,
+}
+
+// returns true if the OS is windows and the WT_SESSION env variable is set.
+var isWindows = runtime.GOOS == "windows"
+var isWindowsTerminalOnWindows = len(os.Getenv("WT_SESSION")) > 0 && isWindows
+
+// returns a valid color's foreground text color attribute
+var colorAttributeMap = map[string]color.Attribute{
+ // default colors for backwards compatibility
+ "black": color.FgBlack,
+ "red": color.FgRed,
+ "green": color.FgGreen,
+ "yellow": color.FgYellow,
+ "blue": color.FgBlue,
+ "magenta": color.FgMagenta,
+ "cyan": color.FgCyan,
+ "white": color.FgWhite,
+
+ // attributes
+ "reset": color.Reset,
+ "bold": color.Bold,
+ "faint": color.Faint,
+ "italic": color.Italic,
+ "underline": color.Underline,
+ "blinkslow": color.BlinkSlow,
+ "blinkrapid": color.BlinkRapid,
+ "reversevideo": color.ReverseVideo,
+ "concealed": color.Concealed,
+ "crossedout": color.CrossedOut,
+
+ // foreground text colors
+ "fgBlack": color.FgBlack,
+ "fgRed": color.FgRed,
+ "fgGreen": color.FgGreen,
+ "fgYellow": color.FgYellow,
+ "fgBlue": color.FgBlue,
+ "fgMagenta": color.FgMagenta,
+ "fgCyan": color.FgCyan,
+ "fgWhite": color.FgWhite,
+
+ // foreground Hi-Intensity text colors
+ "fgHiBlack": color.FgHiBlack,
+ "fgHiRed": color.FgHiRed,
+ "fgHiGreen": color.FgHiGreen,
+ "fgHiYellow": color.FgHiYellow,
+ "fgHiBlue": color.FgHiBlue,
+ "fgHiMagenta": color.FgHiMagenta,
+ "fgHiCyan": color.FgHiCyan,
+ "fgHiWhite": color.FgHiWhite,
+
+ // background text colors
+ "bgBlack": color.BgBlack,
+ "bgRed": color.BgRed,
+ "bgGreen": color.BgGreen,
+ "bgYellow": color.BgYellow,
+ "bgBlue": color.BgBlue,
+ "bgMagenta": color.BgMagenta,
+ "bgCyan": color.BgCyan,
+ "bgWhite": color.BgWhite,
+
+ // background Hi-Intensity text colors
+ "bgHiBlack": color.BgHiBlack,
+ "bgHiRed": color.BgHiRed,
+ "bgHiGreen": color.BgHiGreen,
+ "bgHiYellow": color.BgHiYellow,
+ "bgHiBlue": color.BgHiBlue,
+ "bgHiMagenta": color.BgHiMagenta,
+ "bgHiCyan": color.BgHiCyan,
+ "bgHiWhite": color.BgHiWhite,
+}
+
+// validColor will make sure the given color is actually allowed.
+func validColor(c string) bool {
+ return validColors[c]
+}
+
+// Spinner struct to hold the provided options.
+type Spinner struct {
+ mu *sync.RWMutex
+ Delay time.Duration // Delay is the speed of the indicator
+ chars []string // chars holds the chosen character set
+ Prefix string // Prefix is the text preppended to the indicator
+ Suffix string // Suffix is the text appended to the indicator
+ FinalMSG string // string displayed after Stop() is called
+ lastOutputPlain string // last character(set) written
+ LastOutput string // last character(set) written with colors
+ color func(a ...interface{}) string // default color is white
+ Writer io.Writer // to make testing better, exported so users have access. Use `WithWriter` to update after initialization.
+ WriterFile *os.File // writer as file to allow terminal check
+ active bool // active holds the state of the spinner
+ enabled bool // indicates whether the spinner is enabled or not
+ stopChan chan struct{} // stopChan is a channel used to stop the indicator
+ HideCursor bool // hideCursor determines if the cursor is visible
+ PreUpdate func(s *Spinner) // will be triggered before every spinner update
+ PostUpdate func(s *Spinner) // will be triggered after every spinner update
+}
+
+// New provides a pointer to an instance of Spinner with the supplied options.
+func New(cs []string, d time.Duration, options ...Option) *Spinner {
+ s := &Spinner{
+ Delay: d,
+ chars: cs,
+ color: color.New(color.FgWhite).SprintFunc(),
+ mu: &sync.RWMutex{},
+ Writer: color.Output,
+ WriterFile: os.Stdout, // matches color.Output
+ stopChan: make(chan struct{}, 1),
+ active: false,
+ enabled: true,
+ HideCursor: true,
+ }
+
+ for _, option := range options {
+ option(s)
+ }
+
+ return s
+}
+
+// Option is a function that takes a spinner and applies
+// a given configuration.
+type Option func(*Spinner)
+
+// Options contains fields to configure the spinner.
+type Options struct {
+ Color string
+ Suffix string
+ FinalMSG string
+ HideCursor bool
+}
+
+// WithColor adds the given color to the spinner.
+func WithColor(color string) Option {
+ return func(s *Spinner) {
+ s.Color(color)
+ }
+}
+
+// WithSuffix adds the given string to the spinner
+// as the suffix.
+func WithSuffix(suffix string) Option {
+ return func(s *Spinner) {
+ s.Suffix = suffix
+ }
+}
+
+// WithFinalMSG adds the given string ot the spinner
+// as the final message to be written.
+func WithFinalMSG(finalMsg string) Option {
+ return func(s *Spinner) {
+ s.FinalMSG = finalMsg
+ }
+}
+
+// WithHiddenCursor hides the cursor
+// if hideCursor = true given.
+func WithHiddenCursor(hideCursor bool) Option {
+ return func(s *Spinner) {
+ s.HideCursor = hideCursor
+ }
+}
+
+// WithWriter adds the given writer to the spinner. This
+// function should be favored over directly assigning to
+// the struct value. Assumes it is not working on a terminal
+// since it cannot determine from io.Writer. Use WithWriterFile
+// to support terminal checks.
+func WithWriter(w io.Writer) Option {
+ return func(s *Spinner) {
+ s.mu.Lock()
+ s.Writer = w
+ s.WriterFile = os.Stdout // emulate previous behavior for terminal check
+ s.mu.Unlock()
+ }
+}
+
+// WithWriterFile adds the given writer to the spinner. This
+// function should be favored over directly assigning to
+// the struct value. Unlike WithWriter, this function allows
+// us to check if displaying to a terminal (enable spinning) or
+// not (disable spinning). Supersedes WithWriter()
+func WithWriterFile(f *os.File) Option {
+ return func(s *Spinner) {
+ s.mu.Lock()
+ s.Writer = f // io.Writer for actual writing
+ s.WriterFile = f // file used only for terminal check
+ s.mu.Unlock()
+ }
+}
+
+// Active will return whether or not the spinner is currently active.
+func (s *Spinner) Active() bool {
+ return s.active
+}
+
+// Enabled returns whether or not the spinner is enabled.
+func (s *Spinner) Enabled() bool {
+ return s.enabled
+}
+
+// Enable enables and restarts the spinner
+func (s *Spinner) Enable() {
+ s.enabled = true
+ s.Restart()
+}
+
+// Disable stops and disables the spinner
+func (s *Spinner) Disable() {
+ s.enabled = false
+ s.Stop()
+}
+
+// Start will start the indicator.
+func (s *Spinner) Start() {
+ s.mu.Lock()
+ if s.active || !s.enabled || !isRunningInTerminal(s) {
+ s.mu.Unlock()
+ return
+ }
+ if s.HideCursor && !isWindowsTerminalOnWindows {
+ // hides the cursor
+ fmt.Fprint(s.Writer, "\033[?25l")
+ }
+ // Disable colors for simple Windows CMD or Powershell
+ // as they can not recognize them
+ if isWindows && !isWindowsTerminalOnWindows {
+ color.NoColor = true
+ }
+
+ s.active = true
+ s.mu.Unlock()
+
+ go func() {
+ for {
+ for i := 0; i < len(s.chars); i++ {
+ select {
+ case <-s.stopChan:
+ return
+ default:
+ s.mu.Lock()
+ if !s.active {
+ s.mu.Unlock()
+ return
+ }
+ if !isWindowsTerminalOnWindows {
+ s.erase()
+ }
+
+ if s.PreUpdate != nil {
+ s.PreUpdate(s)
+ }
+
+ var outColor string
+ if isWindows {
+ if s.Writer == os.Stderr {
+ outColor = fmt.Sprintf("\r%s%s%s", s.Prefix, s.chars[i], s.Suffix)
+ } else {
+ outColor = fmt.Sprintf("\r%s%s%s", s.Prefix, s.color(s.chars[i]), s.Suffix)
+ }
+ } else {
+ outColor = fmt.Sprintf("\r%s%s%s", s.Prefix, s.color(s.chars[i]), s.Suffix)
+ }
+ outPlain := fmt.Sprintf("\r%s%s%s", s.Prefix, s.chars[i], s.Suffix)
+ fmt.Fprint(s.Writer, outColor)
+ s.lastOutputPlain = outPlain
+ s.LastOutput = outColor
+ delay := s.Delay
+
+ if s.PostUpdate != nil {
+ s.PostUpdate(s)
+ }
+
+ s.mu.Unlock()
+ time.Sleep(delay)
+ }
+ }
+ }
+ }()
+}
+
+// Stop stops the indicator.
+func (s *Spinner) Stop() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.active {
+ s.active = false
+ if s.HideCursor && !isWindowsTerminalOnWindows {
+ // makes the cursor visible
+ fmt.Fprint(s.Writer, "\033[?25h")
+ }
+ s.erase()
+ if s.FinalMSG != "" {
+ if isWindowsTerminalOnWindows {
+ fmt.Fprint(s.Writer, "\r", s.FinalMSG)
+ } else {
+ fmt.Fprint(s.Writer, s.FinalMSG)
+ }
+ }
+ s.stopChan <- struct{}{}
+ }
+}
+
+// Restart will stop and start the indicator.
+func (s *Spinner) Restart() {
+ s.Stop()
+ s.Start()
+}
+
+// Reverse will reverse the order of the slice assigned to the indicator.
+func (s *Spinner) Reverse() {
+ s.mu.Lock()
+ for i, j := 0, len(s.chars)-1; i < j; i, j = i+1, j-1 {
+ s.chars[i], s.chars[j] = s.chars[j], s.chars[i]
+ }
+ s.mu.Unlock()
+}
+
+// Color will set the struct field for the given color to be used. The spinner
+// will need to be explicitly restarted.
+func (s *Spinner) Color(colors ...string) error {
+ colorAttributes := make([]color.Attribute, len(colors))
+
+ // Verify colours are valid and place the appropriate attribute in the array
+ for index, c := range colors {
+ if !validColor(c) {
+ return errInvalidColor
+ }
+ colorAttributes[index] = colorAttributeMap[c]
+ }
+
+ s.mu.Lock()
+ s.color = color.New(colorAttributes...).SprintFunc()
+ s.mu.Unlock()
+ return nil
+}
+
+// UpdateSpeed will set the indicator delay to the given value.
+func (s *Spinner) UpdateSpeed(d time.Duration) {
+ s.mu.Lock()
+ s.Delay = d
+ s.mu.Unlock()
+}
+
+// UpdateCharSet will change the current character set to the given one.
+func (s *Spinner) UpdateCharSet(cs []string) {
+ s.mu.Lock()
+ s.chars = cs
+ s.mu.Unlock()
+}
+
+// erase deletes written characters on the current line.
+// Caller must already hold s.lock.
+func (s *Spinner) erase() {
+ n := utf8.RuneCountInString(s.lastOutputPlain)
+ if runtime.GOOS == "windows" && !isWindowsTerminalOnWindows {
+ clearString := "\r" + strings.Repeat(" ", n) + "\r"
+ fmt.Fprint(s.Writer, clearString)
+ s.lastOutputPlain = ""
+ return
+ }
+
+ numberOfLinesToErase := computeNumberOfLinesNeededToPrintString(s.lastOutputPlain)
+
+ // Taken from https://en.wikipedia.org/wiki/ANSI_escape_code:
+ // \r - Carriage return - Moves the cursor to column zero
+ // \033[K - Erases part of the line. If n is 0 (or missing), clear from
+ // cursor to the end of the line. If n is 1, clear from cursor to beginning
+ // of the line. If n is 2, clear entire line. Cursor position does not
+ // change.
+ // \033[F - Go to the beginning of previous line
+ eraseCodeString := strings.Builder{}
+ // current position is at the end of the last printed line. Start by erasing current line
+ eraseCodeString.WriteString("\r\033[K") // start by erasing current line
+ for i := 1; i < numberOfLinesToErase; i++ {
+ // For each additional lines, go up one line and erase it.
+ eraseCodeString.WriteString("\033[F\033[K")
+ }
+ fmt.Fprintf(s.Writer, eraseCodeString.String())
+ s.lastOutputPlain = ""
+}
+
+// Lock allows for manual control to lock the spinner.
+func (s *Spinner) Lock() {
+ s.mu.Lock()
+}
+
+// Unlock allows for manual control to unlock the spinner.
+func (s *Spinner) Unlock() {
+ s.mu.Unlock()
+}
+
+// GenerateNumberSequence will generate a slice of integers at the
+// provided length and convert them each to a string.
+func GenerateNumberSequence(length int) []string {
+ numSeq := make([]string, length)
+ for i := 0; i < length; i++ {
+ numSeq[i] = strconv.Itoa(i)
+ }
+ return numSeq
+}
+
+// isRunningInTerminal check if the writer file descriptor is a terminal
+func isRunningInTerminal(s *Spinner) bool {
+ return isatty.IsTerminal(s.WriterFile.Fd())
+}
+
+func computeNumberOfLinesNeededToPrintString(linePrinted string) int {
+ terminalWidth := math.MaxInt // assume infinity by default to keep behaviour consistent with what we had before
+ if term.IsTerminal(0) {
+ if width, _, err := term.GetSize(0); err == nil {
+ terminalWidth = width
+ }
+ }
+ return computeNumberOfLinesNeededToPrintStringInternal(linePrinted, terminalWidth)
+}
+
+// isAnsiMarker returns if a rune denotes the start of an ANSI sequence
+func isAnsiMarker(r rune) bool {
+ return r == '\x1b'
+}
+
+// isAnsiTerminator returns if a rune denotes the end of an ANSI sequence
+func isAnsiTerminator(r rune) bool {
+ return (r >= 0x40 && r <= 0x5a) || (r == 0x5e) || (r >= 0x60 && r <= 0x7e)
+}
+
+// computeLineWidth returns the displayed width of a line
+func computeLineWidth(line string) int {
+ width := 0
+ ansi := false
+
+ for _, r := range []rune(line) {
+ // increase width only when outside of ANSI escape sequences
+ if ansi || isAnsiMarker(r) {
+ ansi = !isAnsiTerminator(r)
+ } else {
+ width += utf8.RuneLen(r)
+ }
+ }
+
+ return width
+}
+
+func computeNumberOfLinesNeededToPrintStringInternal(linePrinted string, maxLineWidth int) int {
+ lineCount := 0
+ for _, line := range strings.Split(linePrinted, "\n") {
+ lineCount += 1
+
+ lineWidth := computeLineWidth(line)
+ if lineWidth > maxLineWidth {
+ lineCount += int(float64(lineWidth) / float64(maxLineWidth))
+ }
+ }
+
+ return lineCount
+}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go
index 4da2bd3633..7d3e1536b3 100644
--- a/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.32.0
// protoc v3.21.5
// source: udpa/annotations/migrate.proto
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go
index 1b72b067f6..38196d5eb0 100644
--- a/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go
@@ -11,6 +11,7 @@ import (
"net/mail"
"net/url"
"regexp"
+ "sort"
"strings"
"time"
"unicode/utf8"
@@ -31,21 +32,57 @@ var (
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
+ _ = sort.Sort
)
// Validate checks the field values on MigrateAnnotation with the rules defined
-// in the proto definition for this message. If any rules are violated, an
-// error is returned.
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
func (m *MigrateAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on MigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// MigrateAnnotationMultiError, or nil if none found.
+func (m *MigrateAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *MigrateAnnotation) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
// no validation rules for Rename
+ if len(errors) > 0 {
+ return MigrateAnnotationMultiError(errors)
+ }
+
return nil
}
+// MigrateAnnotationMultiError is an error wrapping multiple validation errors
+// returned by MigrateAnnotation.ValidateAll() if the designated constraints
+// aren't met.
+type MigrateAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m MigrateAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m MigrateAnnotationMultiError) AllErrors() []error { return m }
+
// MigrateAnnotationValidationError is the validation error returned by
// MigrateAnnotation.Validate if the designated constraints aren't met.
type MigrateAnnotationValidationError struct {
@@ -104,19 +141,54 @@ var _ interface {
// Validate checks the field values on FieldMigrateAnnotation with the rules
// defined in the proto definition for this message. If any rules are
-// violated, an error is returned.
+// violated, the first error encountered is returned, or nil if there are no violations.
func (m *FieldMigrateAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FieldMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FieldMigrateAnnotationMultiError, or nil if none found.
+func (m *FieldMigrateAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FieldMigrateAnnotation) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
// no validation rules for Rename
// no validation rules for OneofPromotion
+ if len(errors) > 0 {
+ return FieldMigrateAnnotationMultiError(errors)
+ }
+
return nil
}
+// FieldMigrateAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FieldMigrateAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FieldMigrateAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FieldMigrateAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FieldMigrateAnnotationMultiError) AllErrors() []error { return m }
+
// FieldMigrateAnnotationValidationError is the validation error returned by
// FieldMigrateAnnotation.Validate if the designated constraints aren't met.
type FieldMigrateAnnotationValidationError struct {
@@ -175,17 +247,52 @@ var _ interface {
// Validate checks the field values on FileMigrateAnnotation with the rules
// defined in the proto definition for this message. If any rules are
-// violated, an error is returned.
+// violated, the first error encountered is returned, or nil if there are no violations.
func (m *FileMigrateAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FileMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FileMigrateAnnotationMultiError, or nil if none found.
+func (m *FileMigrateAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FileMigrateAnnotation) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
// no validation rules for MoveToPackage
+ if len(errors) > 0 {
+ return FileMigrateAnnotationMultiError(errors)
+ }
+
return nil
}
+// FileMigrateAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FileMigrateAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FileMigrateAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FileMigrateAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FileMigrateAnnotationMultiError) AllErrors() []error { return m }
+
// FileMigrateAnnotationValidationError is the validation error returned by
// FileMigrateAnnotation.Validate if the designated constraints aren't met.
type FileMigrateAnnotationValidationError struct {
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go
index c06e280aba..7195778953 100644
--- a/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.32.0
// protoc v3.21.5
// source: udpa/annotations/security.proto
@@ -121,10 +121,10 @@ var file_udpa_annotations_security_proto_rawDesc = []byte{
0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x69,
0x65, 0x6c, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x41, 0x6e, 0x6e, 0x6f, 0x74,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x42,
- 0x31, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e,
- 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61,
- 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02,
- 0x08, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x31, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x08, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67,
+ 0x6f, 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go
index 64058ccdd1..acc9bd7a12 100644
--- a/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go
@@ -11,6 +11,7 @@ import (
"net/mail"
"net/url"
"regexp"
+ "sort"
"strings"
"time"
"unicode/utf8"
@@ -31,23 +32,59 @@ var (
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
+ _ = sort.Sort
)
// Validate checks the field values on FieldSecurityAnnotation with the rules
// defined in the proto definition for this message. If any rules are
-// violated, an error is returned.
+// violated, the first error encountered is returned, or nil if there are no violations.
func (m *FieldSecurityAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FieldSecurityAnnotation with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FieldSecurityAnnotationMultiError, or nil if none found.
+func (m *FieldSecurityAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FieldSecurityAnnotation) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
// no validation rules for ConfigureForUntrustedDownstream
// no validation rules for ConfigureForUntrustedUpstream
+ if len(errors) > 0 {
+ return FieldSecurityAnnotationMultiError(errors)
+ }
+
return nil
}
+// FieldSecurityAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FieldSecurityAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FieldSecurityAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FieldSecurityAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FieldSecurityAnnotationMultiError) AllErrors() []error { return m }
+
// FieldSecurityAnnotationValidationError is the validation error returned by
// FieldSecurityAnnotation.Validate if the designated constraints aren't met.
type FieldSecurityAnnotationValidationError struct {
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go
index f8fc822948..8631b8568c 100644
--- a/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.32.0
// protoc v3.21.5
// source: udpa/annotations/sensitive.proto
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go
index dd4fea9b26..f3fa61974c 100644
--- a/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go
@@ -11,6 +11,7 @@ import (
"net/mail"
"net/url"
"regexp"
+ "sort"
"strings"
"time"
"unicode/utf8"
@@ -31,4 +32,5 @@ var (
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
+ _ = sort.Sort
)
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go
index ac7238e558..f2fdc3ca38 100644
--- a/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.32.0
// protoc v3.21.5
// source: udpa/annotations/status.proto
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go
index 9af17c92f7..5633a83831 100644
--- a/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go
@@ -11,6 +11,7 @@ import (
"net/mail"
"net/url"
"regexp"
+ "sort"
"strings"
"time"
"unicode/utf8"
@@ -31,23 +32,59 @@ var (
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
+ _ = sort.Sort
)
// Validate checks the field values on StatusAnnotation with the rules defined
-// in the proto definition for this message. If any rules are violated, an
-// error is returned.
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
func (m *StatusAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on StatusAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// StatusAnnotationMultiError, or nil if none found.
+func (m *StatusAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *StatusAnnotation) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
// no validation rules for WorkInProgress
// no validation rules for PackageVersionStatus
+ if len(errors) > 0 {
+ return StatusAnnotationMultiError(errors)
+ }
+
return nil
}
+// StatusAnnotationMultiError is an error wrapping multiple validation errors
+// returned by StatusAnnotation.ValidateAll() if the designated constraints
+// aren't met.
+type StatusAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m StatusAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m StatusAnnotationMultiError) AllErrors() []error { return m }
+
// StatusAnnotationValidationError is the validation error returned by
// StatusAnnotation.Validate if the designated constraints aren't met.
type StatusAnnotationValidationError struct {
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go
index 68a101a3f7..df83e0a2eb 100644
--- a/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.32.0
// protoc v3.21.5
// source: udpa/annotations/versioning.proto
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go
index e88144cc1e..5fd86baffd 100644
--- a/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go
@@ -11,6 +11,7 @@ import (
"net/mail"
"net/url"
"regexp"
+ "sort"
"strings"
"time"
"unicode/utf8"
@@ -31,21 +32,57 @@ var (
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
+ _ = sort.Sort
)
// Validate checks the field values on VersioningAnnotation with the rules
// defined in the proto definition for this message. If any rules are
-// violated, an error is returned.
+// violated, the first error encountered is returned, or nil if there are no violations.
func (m *VersioningAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on VersioningAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// VersioningAnnotationMultiError, or nil if none found.
+func (m *VersioningAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *VersioningAnnotation) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
// no validation rules for PreviousMessageType
+ if len(errors) > 0 {
+ return VersioningAnnotationMultiError(errors)
+ }
+
return nil
}
+// VersioningAnnotationMultiError is an error wrapping multiple validation
+// errors returned by VersioningAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type VersioningAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m VersioningAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m VersioningAnnotationMultiError) AllErrors() []error { return m }
+
// VersioningAnnotationValidationError is the validation error returned by
// VersioningAnnotation.Validate if the designated constraints aren't met.
type VersioningAnnotationValidationError struct {
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go
index 0cdd47f757..ad24b1f7f6 100644
--- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.32.0
// protoc v3.21.5
// source: xds/annotations/v3/migrate.proto
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go
index c74f35897e..d57d778247 100644
--- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go
@@ -11,6 +11,7 @@ import (
"net/mail"
"net/url"
"regexp"
+ "sort"
"strings"
"time"
"unicode/utf8"
@@ -31,21 +32,57 @@ var (
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
+ _ = sort.Sort
)
// Validate checks the field values on MigrateAnnotation with the rules defined
-// in the proto definition for this message. If any rules are violated, an
-// error is returned.
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
func (m *MigrateAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on MigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// MigrateAnnotationMultiError, or nil if none found.
+func (m *MigrateAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *MigrateAnnotation) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
// no validation rules for Rename
+ if len(errors) > 0 {
+ return MigrateAnnotationMultiError(errors)
+ }
+
return nil
}
+// MigrateAnnotationMultiError is an error wrapping multiple validation errors
+// returned by MigrateAnnotation.ValidateAll() if the designated constraints
+// aren't met.
+type MigrateAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m MigrateAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m MigrateAnnotationMultiError) AllErrors() []error { return m }
+
// MigrateAnnotationValidationError is the validation error returned by
// MigrateAnnotation.Validate if the designated constraints aren't met.
type MigrateAnnotationValidationError struct {
@@ -104,19 +141,54 @@ var _ interface {
// Validate checks the field values on FieldMigrateAnnotation with the rules
// defined in the proto definition for this message. If any rules are
-// violated, an error is returned.
+// violated, the first error encountered is returned, or nil if there are no violations.
func (m *FieldMigrateAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FieldMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FieldMigrateAnnotationMultiError, or nil if none found.
+func (m *FieldMigrateAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FieldMigrateAnnotation) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
// no validation rules for Rename
// no validation rules for OneofPromotion
+ if len(errors) > 0 {
+ return FieldMigrateAnnotationMultiError(errors)
+ }
+
return nil
}
+// FieldMigrateAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FieldMigrateAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FieldMigrateAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FieldMigrateAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FieldMigrateAnnotationMultiError) AllErrors() []error { return m }
+
// FieldMigrateAnnotationValidationError is the validation error returned by
// FieldMigrateAnnotation.Validate if the designated constraints aren't met.
type FieldMigrateAnnotationValidationError struct {
@@ -175,17 +247,52 @@ var _ interface {
// Validate checks the field values on FileMigrateAnnotation with the rules
// defined in the proto definition for this message. If any rules are
-// violated, an error is returned.
+// violated, the first error encountered is returned, or nil if there are no violations.
func (m *FileMigrateAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FileMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FileMigrateAnnotationMultiError, or nil if none found.
+func (m *FileMigrateAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FileMigrateAnnotation) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
// no validation rules for MoveToPackage
+ if len(errors) > 0 {
+ return FileMigrateAnnotationMultiError(errors)
+ }
+
return nil
}
+// FileMigrateAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FileMigrateAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FileMigrateAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FileMigrateAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FileMigrateAnnotationMultiError) AllErrors() []error { return m }
+
// FileMigrateAnnotationValidationError is the validation error returned by
// FileMigrateAnnotation.Validate if the designated constraints aren't met.
type FileMigrateAnnotationValidationError struct {
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go
index a50efc41b2..61df6890bd 100644
--- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.32.0
// protoc v3.21.5
// source: xds/annotations/v3/security.proto
@@ -121,10 +121,10 @@ var file_xds_annotations_v3_security_proto_rawDesc = []byte{
0x32, 0x2b, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72,
0x69, 0x74, 0x79, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73,
- 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x42, 0x33, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75,
- 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67,
- 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x2f, 0x76, 0x33, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x62, 0x06, 0x70, 0x72,
+ 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x42, 0x33, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08,
+ 0x01, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e,
+ 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go
index 3bee0479fd..ac0143f277 100644
--- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go
@@ -11,6 +11,7 @@ import (
"net/mail"
"net/url"
"regexp"
+ "sort"
"strings"
"time"
"unicode/utf8"
@@ -31,23 +32,59 @@ var (
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
+ _ = sort.Sort
)
// Validate checks the field values on FieldSecurityAnnotation with the rules
// defined in the proto definition for this message. If any rules are
-// violated, an error is returned.
+// violated, the first error encountered is returned, or nil if there are no violations.
func (m *FieldSecurityAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FieldSecurityAnnotation with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FieldSecurityAnnotationMultiError, or nil if none found.
+func (m *FieldSecurityAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FieldSecurityAnnotation) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
// no validation rules for ConfigureForUntrustedDownstream
// no validation rules for ConfigureForUntrustedUpstream
+ if len(errors) > 0 {
+ return FieldSecurityAnnotationMultiError(errors)
+ }
+
return nil
}
+// FieldSecurityAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FieldSecurityAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FieldSecurityAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FieldSecurityAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FieldSecurityAnnotationMultiError) AllErrors() []error { return m }
+
// FieldSecurityAnnotationValidationError is the validation error returned by
// FieldSecurityAnnotation.Validate if the designated constraints aren't met.
type FieldSecurityAnnotationValidationError struct {
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go
index 1fbfafa82d..274eace058 100644
--- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.32.0
// protoc v3.21.5
// source: xds/annotations/v3/sensitive.proto
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go
index 7f368572a5..c101d3acc4 100644
--- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go
@@ -11,6 +11,7 @@ import (
"net/mail"
"net/url"
"regexp"
+ "sort"
"strings"
"time"
"unicode/utf8"
@@ -31,4 +32,5 @@ var (
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
+ _ = sort.Sort
)
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go
index 842025bd71..2497e0b2fe 100644
--- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.32.0
// protoc v3.21.5
// source: xds/annotations/v3/status.proto
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go
index a8ebf097df..a87dbee8d8 100644
--- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go
@@ -11,6 +11,7 @@ import (
"net/mail"
"net/url"
"regexp"
+ "sort"
"strings"
"time"
"unicode/utf8"
@@ -31,21 +32,57 @@ var (
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
+ _ = sort.Sort
)
// Validate checks the field values on FileStatusAnnotation with the rules
// defined in the proto definition for this message. If any rules are
-// violated, an error is returned.
+// violated, the first error encountered is returned, or nil if there are no violations.
func (m *FileStatusAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FileStatusAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FileStatusAnnotationMultiError, or nil if none found.
+func (m *FileStatusAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FileStatusAnnotation) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
// no validation rules for WorkInProgress
+ if len(errors) > 0 {
+ return FileStatusAnnotationMultiError(errors)
+ }
+
return nil
}
+// FileStatusAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FileStatusAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FileStatusAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FileStatusAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FileStatusAnnotationMultiError) AllErrors() []error { return m }
+
// FileStatusAnnotationValidationError is the validation error returned by
// FileStatusAnnotation.Validate if the designated constraints aren't met.
type FileStatusAnnotationValidationError struct {
@@ -104,17 +141,52 @@ var _ interface {
// Validate checks the field values on MessageStatusAnnotation with the rules
// defined in the proto definition for this message. If any rules are
-// violated, an error is returned.
+// violated, the first error encountered is returned, or nil if there are no violations.
func (m *MessageStatusAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on MessageStatusAnnotation with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// MessageStatusAnnotationMultiError, or nil if none found.
+func (m *MessageStatusAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *MessageStatusAnnotation) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
// no validation rules for WorkInProgress
+ if len(errors) > 0 {
+ return MessageStatusAnnotationMultiError(errors)
+ }
+
return nil
}
+// MessageStatusAnnotationMultiError is an error wrapping multiple validation
+// errors returned by MessageStatusAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type MessageStatusAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m MessageStatusAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m MessageStatusAnnotationMultiError) AllErrors() []error { return m }
+
// MessageStatusAnnotationValidationError is the validation error returned by
// MessageStatusAnnotation.Validate if the designated constraints aren't met.
type MessageStatusAnnotationValidationError struct {
@@ -173,17 +245,52 @@ var _ interface {
// Validate checks the field values on FieldStatusAnnotation with the rules
// defined in the proto definition for this message. If any rules are
-// violated, an error is returned.
+// violated, the first error encountered is returned, or nil if there are no violations.
func (m *FieldStatusAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FieldStatusAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FieldStatusAnnotationMultiError, or nil if none found.
+func (m *FieldStatusAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FieldStatusAnnotation) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
// no validation rules for WorkInProgress
+ if len(errors) > 0 {
+ return FieldStatusAnnotationMultiError(errors)
+ }
+
return nil
}
+// FieldStatusAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FieldStatusAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FieldStatusAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FieldStatusAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FieldStatusAnnotationMultiError) AllErrors() []error { return m }
+
// FieldStatusAnnotationValidationError is the validation error returned by
// FieldStatusAnnotation.Validate if the designated constraints aren't met.
type FieldStatusAnnotationValidationError struct {
@@ -241,20 +348,55 @@ var _ interface {
} = FieldStatusAnnotationValidationError{}
// Validate checks the field values on StatusAnnotation with the rules defined
-// in the proto definition for this message. If any rules are violated, an
-// error is returned.
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
func (m *StatusAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on StatusAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// StatusAnnotationMultiError, or nil if none found.
+func (m *StatusAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *StatusAnnotation) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
// no validation rules for WorkInProgress
// no validation rules for PackageVersionStatus
+ if len(errors) > 0 {
+ return StatusAnnotationMultiError(errors)
+ }
+
return nil
}
+// StatusAnnotationMultiError is an error wrapping multiple validation errors
+// returned by StatusAnnotation.ValidateAll() if the designated constraints
+// aren't met.
+type StatusAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m StatusAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m StatusAnnotationMultiError) AllErrors() []error { return m }
+
// StatusAnnotationValidationError is the validation error returned by
// StatusAnnotation.Validate if the designated constraints aren't met.
type StatusAnnotationValidationError struct {
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go
index 5412c812a1..2307dc874a 100644
--- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.32.0
// protoc v3.21.5
// source: xds/annotations/v3/versioning.proto
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go
index 80c53b21cc..042c266e13 100644
--- a/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go
@@ -11,6 +11,7 @@ import (
"net/mail"
"net/url"
"regexp"
+ "sort"
"strings"
"time"
"unicode/utf8"
@@ -31,21 +32,57 @@ var (
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
+ _ = sort.Sort
)
// Validate checks the field values on VersioningAnnotation with the rules
// defined in the proto definition for this message. If any rules are
-// violated, an error is returned.
+// violated, the first error encountered is returned, or nil if there are no violations.
func (m *VersioningAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on VersioningAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// VersioningAnnotationMultiError, or nil if none found.
+func (m *VersioningAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *VersioningAnnotation) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
// no validation rules for PreviousMessageType
+ if len(errors) > 0 {
+ return VersioningAnnotationMultiError(errors)
+ }
+
return nil
}
+// VersioningAnnotationMultiError is an error wrapping multiple validation
+// errors returned by VersioningAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type VersioningAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m VersioningAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m VersioningAnnotationMultiError) AllErrors() []error { return m }
+
// VersioningAnnotationValidationError is the validation error returned by
// VersioningAnnotation.Validate if the designated constraints aren't met.
type VersioningAnnotationValidationError struct {
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go
index 5a22c32665..3c361216c0 100644
--- a/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.32.0
// protoc v3.21.5
// source: xds/core/v3/authority.proto
@@ -81,12 +81,12 @@ var file_xds_core_v3_authority_proto_rawDesc = []byte{
0x72, 0x6f, 0x74, 0x6f, 0x22, 0x28, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74,
0x79, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x56,
- 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73,
- 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72,
- 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68,
- 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f,
- 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0xd2, 0xc6,
- 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x42, 0x0e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63,
+ 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go
index 06b55362da..94317c2af0 100644
--- a/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go
@@ -11,6 +11,7 @@ import (
"net/mail"
"net/url"
"regexp"
+ "sort"
"strings"
"time"
"unicode/utf8"
@@ -31,25 +32,65 @@ var (
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
+ _ = sort.Sort
)
// Validate checks the field values on Authority with the rules defined in the
-// proto definition for this message. If any rules are violated, an error is returned.
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
func (m *Authority) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Authority with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in AuthorityMultiError, or nil
+// if none found.
+func (m *Authority) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Authority) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
if utf8.RuneCountInString(m.GetName()) < 1 {
- return AuthorityValidationError{
+ err := AuthorityValidationError{
field: "Name",
reason: "value length must be at least 1 runes",
}
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return AuthorityMultiError(errors)
}
return nil
}
+// AuthorityMultiError is an error wrapping multiple validation errors returned
+// by Authority.ValidateAll() if the designated constraints aren't met.
+type AuthorityMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m AuthorityMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m AuthorityMultiError) AllErrors() []error { return m }
+
// AuthorityValidationError is the validation error returned by
// Authority.Validate if the designated constraints aren't met.
type AuthorityValidationError struct {
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go
index e915cdb9d2..60587a2fa9 100644
--- a/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.32.0
// protoc v3.21.5
// source: xds/core/v3/cidr.proto
@@ -97,12 +97,12 @@ var file_xds_core_v3_cidr_proto_rawDesc = []byte{
0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42,
0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0x80, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x66, 0x69,
- 0x78, 0x4c, 0x65, 0x6e, 0x42, 0x56, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68,
- 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0e,
- 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
- 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63,
- 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72,
- 0x65, 0x2f, 0x76, 0x33, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x62, 0x06, 0x70, 0x72,
+ 0x78, 0x4c, 0x65, 0x6e, 0x42, 0x56, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16,
+ 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f,
+ 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go
index eb48b32ba2..43327f56b5 100644
--- a/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go
@@ -11,6 +11,7 @@ import (
"net/mail"
"net/url"
"regexp"
+ "sort"
"strings"
"time"
"unicode/utf8"
@@ -31,36 +32,80 @@ var (
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
+ _ = sort.Sort
)
// Validate checks the field values on CidrRange with the rules defined in the
-// proto definition for this message. If any rules are violated, an error is returned.
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
func (m *CidrRange) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CidrRange with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in CidrRangeMultiError, or nil
+// if none found.
+func (m *CidrRange) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CidrRange) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
if utf8.RuneCountInString(m.GetAddressPrefix()) < 1 {
- return CidrRangeValidationError{
+ err := CidrRangeValidationError{
field: "AddressPrefix",
reason: "value length must be at least 1 runes",
}
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
}
if wrapper := m.GetPrefixLen(); wrapper != nil {
if wrapper.GetValue() > 128 {
- return CidrRangeValidationError{
+ err := CidrRangeValidationError{
field: "PrefixLen",
reason: "value must be less than or equal to 128",
}
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
}
}
+ if len(errors) > 0 {
+ return CidrRangeMultiError(errors)
+ }
+
return nil
}
+// CidrRangeMultiError is an error wrapping multiple validation errors returned
+// by CidrRange.ValidateAll() if the designated constraints aren't met.
+type CidrRangeMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CidrRangeMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CidrRangeMultiError) AllErrors() []error { return m }
+
// CidrRangeValidationError is the validation error returned by
// CidrRange.Validate if the designated constraints aren't met.
type CidrRangeValidationError struct {
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go
index e91c6abe7f..63e33eeb81 100644
--- a/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.32.0
// protoc v3.21.5
// source: xds/core/v3/collection_entry.proto
@@ -201,12 +201,12 @@ var file_xds_core_v3_collection_entry_proto_rawDesc = []byte{
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x72, 0x65, 0x73,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x19, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01,
- 0x42, 0x5c, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78,
- 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x14, 0x43, 0x6f, 0x6c, 0x6c,
- 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f,
- 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63,
- 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63,
- 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x62, 0x06,
+ 0x42, 0x5c, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e,
+ 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x76, 0x33, 0x42, 0x14, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f,
+ 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go
index a812625302..610990b7fe 100644
--- a/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go
@@ -11,6 +11,7 @@ import (
"net/mail"
"net/url"
"regexp"
+ "sort"
"strings"
"time"
"unicode/utf8"
@@ -31,21 +32,66 @@ var (
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
+ _ = sort.Sort
)
// Validate checks the field values on CollectionEntry with the rules defined
-// in the proto definition for this message. If any rules are violated, an
-// error is returned.
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
func (m *CollectionEntry) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CollectionEntry with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// CollectionEntryMultiError, or nil if none found.
+func (m *CollectionEntry) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CollectionEntry) validate(all bool) error {
if m == nil {
return nil
}
- switch m.ResourceSpecifier.(type) {
+ var errors []error
+ oneofResourceSpecifierPresent := false
+ switch v := m.ResourceSpecifier.(type) {
case *CollectionEntry_Locator:
-
- if v, ok := interface{}(m.GetLocator()).(interface{ Validate() error }); ok {
+ if v == nil {
+ err := CollectionEntryValidationError{
+ field: "ResourceSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofResourceSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetLocator()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CollectionEntryValidationError{
+ field: "Locator",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CollectionEntryValidationError{
+ field: "Locator",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLocator()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return CollectionEntryValidationError{
field: "Locator",
@@ -56,8 +102,38 @@ func (m *CollectionEntry) Validate() error {
}
case *CollectionEntry_InlineEntry_:
-
- if v, ok := interface{}(m.GetInlineEntry()).(interface{ Validate() error }); ok {
+ if v == nil {
+ err := CollectionEntryValidationError{
+ field: "ResourceSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofResourceSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetInlineEntry()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CollectionEntryValidationError{
+ field: "InlineEntry",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CollectionEntryValidationError{
+ field: "InlineEntry",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetInlineEntry()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return CollectionEntryValidationError{
field: "InlineEntry",
@@ -68,16 +144,43 @@ func (m *CollectionEntry) Validate() error {
}
default:
- return CollectionEntryValidationError{
+ _ = v // ensures v is used
+ }
+ if !oneofResourceSpecifierPresent {
+ err := CollectionEntryValidationError{
field: "ResourceSpecifier",
reason: "value is required",
}
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ if len(errors) > 0 {
+ return CollectionEntryMultiError(errors)
}
return nil
}
+// CollectionEntryMultiError is an error wrapping multiple validation errors
+// returned by CollectionEntry.ValidateAll() if the designated constraints
+// aren't met.
+type CollectionEntryMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CollectionEntryMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CollectionEntryMultiError) AllErrors() []error { return m }
+
// CollectionEntryValidationError is the validation error returned by
// CollectionEntry.Validate if the designated constraints aren't met.
type CollectionEntryValidationError struct {
@@ -134,22 +237,59 @@ var _ interface {
// Validate checks the field values on CollectionEntry_InlineEntry with the
// rules defined in the proto definition for this message. If any rules are
-// violated, an error is returned.
+// violated, the first error encountered is returned, or nil if there are no violations.
func (m *CollectionEntry_InlineEntry) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CollectionEntry_InlineEntry with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// CollectionEntry_InlineEntryMultiError, or nil if none found.
+func (m *CollectionEntry_InlineEntry) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CollectionEntry_InlineEntry) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
if !_CollectionEntry_InlineEntry_Name_Pattern.MatchString(m.GetName()) {
- return CollectionEntry_InlineEntryValidationError{
+ err := CollectionEntry_InlineEntryValidationError{
field: "Name",
reason: "value does not match regex pattern \"^[0-9a-zA-Z_\\\\-\\\\.~:]+$\"",
}
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
}
// no validation rules for Version
- if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok {
+ if all {
+ switch v := interface{}(m.GetResource()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CollectionEntry_InlineEntryValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CollectionEntry_InlineEntryValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return CollectionEntry_InlineEntryValidationError{
field: "Resource",
@@ -159,9 +299,30 @@ func (m *CollectionEntry_InlineEntry) Validate() error {
}
}
+ if len(errors) > 0 {
+ return CollectionEntry_InlineEntryMultiError(errors)
+ }
+
return nil
}
+// CollectionEntry_InlineEntryMultiError is an error wrapping multiple
+// validation errors returned by CollectionEntry_InlineEntry.ValidateAll() if
+// the designated constraints aren't met.
+type CollectionEntry_InlineEntryMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CollectionEntry_InlineEntryMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CollectionEntry_InlineEntryMultiError) AllErrors() []error { return m }
+
// CollectionEntry_InlineEntryValidationError is the validation error returned
// by CollectionEntry_InlineEntry.Validate if the designated constraints
// aren't met.
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go
index f3f37162b9..563775a1fb 100644
--- a/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.32.0
// protoc v3.21.5
// source: xds/core/v3/context_params.proto
@@ -84,13 +84,13 @@ var file_xds_core_v3_context_params_proto_rawDesc = []byte{
0x6d, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x5a, 0x0a,
- 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e,
- 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x12, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74,
- 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67,
- 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78,
- 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76,
- 0x33, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x5a, 0xd2,
+ 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74,
+ 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42,
+ 0x12, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64,
+ 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go
index 31277a6284..1c9accaa3a 100644
--- a/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go
@@ -11,6 +11,7 @@ import (
"net/mail"
"net/url"
"regexp"
+ "sort"
"strings"
"time"
"unicode/utf8"
@@ -31,21 +32,57 @@ var (
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
+ _ = sort.Sort
)
// Validate checks the field values on ContextParams with the rules defined in
-// the proto definition for this message. If any rules are violated, an error
-// is returned.
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
func (m *ContextParams) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ContextParams with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ContextParamsMultiError, or
+// nil if none found.
+func (m *ContextParams) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ContextParams) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
// no validation rules for Params
+ if len(errors) > 0 {
+ return ContextParamsMultiError(errors)
+ }
+
return nil
}
+// ContextParamsMultiError is an error wrapping multiple validation errors
+// returned by ContextParams.ValidateAll() if the designated constraints
+// aren't met.
+type ContextParamsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ContextParamsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ContextParamsMultiError) AllErrors() []error { return m }
+
// ContextParamsValidationError is the validation error returned by
// ContextParams.Validate if the designated constraints aren't met.
type ContextParamsValidationError struct {
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go
index 41db466bd9..35845e0187 100644
--- a/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.32.0
// protoc v3.21.5
// source: xds/core/v3/extension.proto
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go
index 2acbda3c6f..839f3fef79 100644
--- a/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go
@@ -11,6 +11,7 @@ import (
"net/mail"
"net/url"
"regexp"
+ "sort"
"strings"
"time"
"unicode/utf8"
@@ -31,37 +32,81 @@ var (
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
+ _ = sort.Sort
)
// Validate checks the field values on TypedExtensionConfig with the rules
// defined in the proto definition for this message. If any rules are
-// violated, an error is returned.
+// violated, the first error encountered is returned, or nil if there are no violations.
func (m *TypedExtensionConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on TypedExtensionConfig with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// TypedExtensionConfigMultiError, or nil if none found.
+func (m *TypedExtensionConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *TypedExtensionConfig) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
if utf8.RuneCountInString(m.GetName()) < 1 {
- return TypedExtensionConfigValidationError{
+ err := TypedExtensionConfigValidationError{
field: "Name",
reason: "value length must be at least 1 runes",
}
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
}
if m.GetTypedConfig() == nil {
- return TypedExtensionConfigValidationError{
+ err := TypedExtensionConfigValidationError{
field: "TypedConfig",
reason: "value is required",
}
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
}
if a := m.GetTypedConfig(); a != nil {
}
+ if len(errors) > 0 {
+ return TypedExtensionConfigMultiError(errors)
+ }
+
return nil
}
+// TypedExtensionConfigMultiError is an error wrapping multiple validation
+// errors returned by TypedExtensionConfig.ValidateAll() if the designated
+// constraints aren't met.
+type TypedExtensionConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m TypedExtensionConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m TypedExtensionConfigMultiError) AllErrors() []error { return m }
+
// TypedExtensionConfigValidationError is the validation error returned by
// TypedExtensionConfig.Validate if the designated constraints aren't met.
type TypedExtensionConfigValidationError struct {
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go
index 3b4c853dc9..4cf1f7e7e7 100644
--- a/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.32.0
// protoc v3.21.5
// source: xds/core/v3/resource.proto
@@ -105,12 +105,12 @@ var file_xds_core_v3_resource_proto_rawDesc = []byte{
0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x55,
- 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73,
- 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75,
- 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67,
- 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0xd2, 0xc6, 0xa4,
- 0xe1, 0x06, 0x02, 0x08, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e,
+ 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f,
+ 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go
index 4e49352cc3..dc972171c9 100644
--- a/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go
@@ -11,6 +11,7 @@ import (
"net/mail"
"net/url"
"regexp"
+ "sort"
"strings"
"time"
"unicode/utf8"
@@ -31,16 +32,51 @@ var (
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
+ _ = sort.Sort
)
// Validate checks the field values on Resource with the rules defined in the
-// proto definition for this message. If any rules are violated, an error is returned.
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
func (m *Resource) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Resource with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ResourceMultiError, or nil
+// if none found.
+func (m *Resource) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Resource) validate(all bool) error {
if m == nil {
return nil
}
- if v, ok := interface{}(m.GetName()).(interface{ Validate() error }); ok {
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetName()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceValidationError{
+ field: "Name",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceValidationError{
+ field: "Name",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetName()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return ResourceValidationError{
field: "Name",
@@ -52,7 +88,26 @@ func (m *Resource) Validate() error {
// no validation rules for Version
- if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok {
+ if all {
+ switch v := interface{}(m.GetResource()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return ResourceValidationError{
field: "Resource",
@@ -62,9 +117,29 @@ func (m *Resource) Validate() error {
}
}
+ if len(errors) > 0 {
+ return ResourceMultiError(errors)
+ }
+
return nil
}
+// ResourceMultiError is an error wrapping multiple validation errors returned
+// by Resource.ValidateAll() if the designated constraints aren't met.
+type ResourceMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ResourceMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ResourceMultiError) AllErrors() []error { return m }
+
// ResourceValidationError is the validation error returned by
// Resource.Validate if the designated constraints aren't met.
type ResourceValidationError struct {
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go
index 8123f1140f..50fe599dbf 100644
--- a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.32.0
// protoc v3.21.5
// source: xds/core/v3/resource_locator.proto
@@ -304,12 +304,12 @@ var file_xds_core_v3_resource_locator_proto_rawDesc = []byte{
0x08, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x49, 0x4c,
0x45, 0x10, 0x02, 0x42, 0x19, 0x0a, 0x17, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70,
0x61, 0x72, 0x61, 0x6d, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x5c,
- 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73,
- 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
- 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63,
- 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72,
- 0x65, 0x2f, 0x76, 0x33, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x62, 0x06, 0x70, 0x72,
+ 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x42, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f,
+ 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f,
+ 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go
index ff91eecd7c..1686e98d12 100644
--- a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go
@@ -11,6 +11,7 @@ import (
"net/mail"
"net/url"
"regexp"
+ "sort"
"strings"
"time"
"unicode/utf8"
@@ -31,21 +32,40 @@ var (
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
+ _ = sort.Sort
)
// Validate checks the field values on ResourceLocator with the rules defined
-// in the proto definition for this message. If any rules are violated, an
-// error is returned.
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
func (m *ResourceLocator) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ResourceLocator with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ResourceLocatorMultiError, or nil if none found.
+func (m *ResourceLocator) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ResourceLocator) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
if _, ok := ResourceLocator_Scheme_name[int32(m.GetScheme())]; !ok {
- return ResourceLocatorValidationError{
+ err := ResourceLocatorValidationError{
field: "Scheme",
reason: "value must be one of the defined enum values",
}
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
}
// no validation rules for Id
@@ -53,16 +73,39 @@ func (m *ResourceLocator) Validate() error {
// no validation rules for Authority
if utf8.RuneCountInString(m.GetResourceType()) < 1 {
- return ResourceLocatorValidationError{
+ err := ResourceLocatorValidationError{
field: "ResourceType",
reason: "value length must be at least 1 runes",
}
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
}
for idx, item := range m.GetDirectives() {
_, _ = idx, item
- if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceLocatorValidationError{
+ field: fmt.Sprintf("Directives[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceLocatorValidationError{
+ field: fmt.Sprintf("Directives[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return ResourceLocatorValidationError{
field: fmt.Sprintf("Directives[%v]", idx),
@@ -74,11 +117,39 @@ func (m *ResourceLocator) Validate() error {
}
- switch m.ContextParamSpecifier.(type) {
-
+ switch v := m.ContextParamSpecifier.(type) {
case *ResourceLocator_ExactContext:
+ if v == nil {
+ err := ResourceLocatorValidationError{
+ field: "ContextParamSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
- if v, ok := interface{}(m.GetExactContext()).(interface{ Validate() error }); ok {
+ if all {
+ switch v := interface{}(m.GetExactContext()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceLocatorValidationError{
+ field: "ExactContext",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceLocatorValidationError{
+ field: "ExactContext",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetExactContext()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return ResourceLocatorValidationError{
field: "ExactContext",
@@ -88,11 +159,34 @@ func (m *ResourceLocator) Validate() error {
}
}
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return ResourceLocatorMultiError(errors)
}
return nil
}
+// ResourceLocatorMultiError is an error wrapping multiple validation errors
+// returned by ResourceLocator.ValidateAll() if the designated constraints
+// aren't met.
+type ResourceLocatorMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ResourceLocatorMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ResourceLocatorMultiError) AllErrors() []error { return m }
+
// ResourceLocatorValidationError is the validation error returned by
// ResourceLocator.Validate if the designated constraints aren't met.
type ResourceLocatorValidationError struct {
@@ -149,17 +243,61 @@ var _ interface {
// Validate checks the field values on ResourceLocator_Directive with the rules
// defined in the proto definition for this message. If any rules are
-// violated, an error is returned.
+// violated, the first error encountered is returned, or nil if there are no violations.
func (m *ResourceLocator_Directive) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ResourceLocator_Directive with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ResourceLocator_DirectiveMultiError, or nil if none found.
+func (m *ResourceLocator_Directive) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ResourceLocator_Directive) validate(all bool) error {
if m == nil {
return nil
}
- switch m.Directive.(type) {
+ var errors []error
+ oneofDirectivePresent := false
+ switch v := m.Directive.(type) {
case *ResourceLocator_Directive_Alt:
-
- if v, ok := interface{}(m.GetAlt()).(interface{ Validate() error }); ok {
+ if v == nil {
+ err := ResourceLocator_DirectiveValidationError{
+ field: "Directive",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofDirectivePresent = true
+
+ if all {
+ switch v := interface{}(m.GetAlt()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceLocator_DirectiveValidationError{
+ field: "Alt",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceLocator_DirectiveValidationError{
+ field: "Alt",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAlt()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return ResourceLocator_DirectiveValidationError{
field: "Alt",
@@ -170,32 +308,78 @@ func (m *ResourceLocator_Directive) Validate() error {
}
case *ResourceLocator_Directive_Entry:
+ if v == nil {
+ err := ResourceLocator_DirectiveValidationError{
+ field: "Directive",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofDirectivePresent = true
if utf8.RuneCountInString(m.GetEntry()) < 1 {
- return ResourceLocator_DirectiveValidationError{
+ err := ResourceLocator_DirectiveValidationError{
field: "Entry",
reason: "value length must be at least 1 runes",
}
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
}
if !_ResourceLocator_Directive_Entry_Pattern.MatchString(m.GetEntry()) {
- return ResourceLocator_DirectiveValidationError{
+ err := ResourceLocator_DirectiveValidationError{
field: "Entry",
reason: "value does not match regex pattern \"^[0-9a-zA-Z_\\\\-\\\\./~:]+$\"",
}
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
}
default:
- return ResourceLocator_DirectiveValidationError{
+ _ = v // ensures v is used
+ }
+ if !oneofDirectivePresent {
+ err := ResourceLocator_DirectiveValidationError{
field: "Directive",
reason: "value is required",
}
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ if len(errors) > 0 {
+ return ResourceLocator_DirectiveMultiError(errors)
}
return nil
}
+// ResourceLocator_DirectiveMultiError is an error wrapping multiple validation
+// errors returned by ResourceLocator_Directive.ValidateAll() if the
+// designated constraints aren't met.
+type ResourceLocator_DirectiveMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ResourceLocator_DirectiveMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ResourceLocator_DirectiveMultiError) AllErrors() []error { return m }
+
// ResourceLocator_DirectiveValidationError is the validation error returned by
// ResourceLocator_Directive.Validate if the designated constraints aren't met.
type ResourceLocator_DirectiveValidationError struct {
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go
index 19e67f6ac6..92d5fa8539 100644
--- a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.32.0
// protoc v3.21.5
// source: xds/core/v3/resource_name.proto
@@ -114,13 +114,13 @@ var file_xds_core_v3_resource_name_proto_rawDesc = []byte{
0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65,
0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63,
0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61,
- 0x72, 0x61, 0x6d, 0x73, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x59, 0x0a,
- 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e,
- 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69,
- 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64,
- 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33,
- 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x72, 0x61, 0x6d, 0x73, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x59, 0xd2,
+ 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74,
+ 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42,
+ 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73,
+ 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go
index db525b9780..270e921bc3 100644
--- a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go
@@ -11,6 +11,7 @@ import (
"net/mail"
"net/url"
"regexp"
+ "sort"
"strings"
"time"
"unicode/utf8"
@@ -31,28 +32,66 @@ var (
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
+ _ = sort.Sort
)
// Validate checks the field values on ResourceName with the rules defined in
-// the proto definition for this message. If any rules are violated, an error
-// is returned.
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
func (m *ResourceName) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ResourceName with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ResourceNameMultiError, or
+// nil if none found.
+func (m *ResourceName) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ResourceName) validate(all bool) error {
if m == nil {
return nil
}
+ var errors []error
+
// no validation rules for Id
// no validation rules for Authority
if utf8.RuneCountInString(m.GetResourceType()) < 1 {
- return ResourceNameValidationError{
+ err := ResourceNameValidationError{
field: "ResourceType",
reason: "value length must be at least 1 runes",
}
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
}
- if v, ok := interface{}(m.GetContext()).(interface{ Validate() error }); ok {
+ if all {
+ switch v := interface{}(m.GetContext()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceNameValidationError{
+ field: "Context",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceNameValidationError{
+ field: "Context",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetContext()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return ResourceNameValidationError{
field: "Context",
@@ -62,9 +101,29 @@ func (m *ResourceName) Validate() error {
}
}
+ if len(errors) > 0 {
+ return ResourceNameMultiError(errors)
+ }
+
return nil
}
+// ResourceNameMultiError is an error wrapping multiple validation errors
+// returned by ResourceName.ValidateAll() if the designated constraints aren't met.
+type ResourceNameMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ResourceNameMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ResourceNameMultiError) AllErrors() []error { return m }
+
// ResourceNameValidationError is the validation error returned by
// ResourceName.Validate if the designated constraints aren't met.
type ResourceNameValidationError struct {
diff --git a/vendor/github.com/expr-lang/expr/README.md b/vendor/github.com/expr-lang/expr/README.md
index bd34c7d248..6c56c67b67 100644
--- a/vendor/github.com/expr-lang/expr/README.md
+++ b/vendor/github.com/expr-lang/expr/README.md
@@ -162,6 +162,9 @@ func main() {
* [Visually.io](https://visually.io) employs Expr as a business rule engine for its personalization targeting algorithm.
* [Akvorado](https://github.com/akvorado/akvorado) utilizes Expr to classify exporters and interfaces in network flows.
* [keda.sh](https://keda.sh) uses Expr to allow customization of its Kubernetes-based event-driven autoscaling.
+* [Span Digital](https://spandigital.com/) uses Expr in it's Knowledge Management products.
+* [Xiaohongshu](https://www.xiaohongshu.com/) combining yaml with Expr for dynamically policies delivery.
+* [Melrōse](https://melrōse.org) uses Expr to implement its music programming language.
[Add your company too](https://github.com/expr-lang/expr/edit/master/README.md)
diff --git a/vendor/github.com/expr-lang/expr/ast/print.go b/vendor/github.com/expr-lang/expr/ast/print.go
index 063e9eb277..6a7d698a99 100644
--- a/vendor/github.com/expr-lang/expr/ast/print.go
+++ b/vendor/github.com/expr-lang/expr/ast/print.go
@@ -65,8 +65,7 @@ func (n *BinaryNode) String() string {
var lhs, rhs string
var lwrap, rwrap bool
- lb, ok := n.Left.(*BinaryNode)
- if ok {
+ if lb, ok := n.Left.(*BinaryNode); ok {
if operator.Less(lb.Operator, n.Operator) {
lwrap = true
}
@@ -77,9 +76,7 @@ func (n *BinaryNode) String() string {
lwrap = true
}
}
-
- rb, ok := n.Right.(*BinaryNode)
- if ok {
+ if rb, ok := n.Right.(*BinaryNode); ok {
if operator.Less(rb.Operator, n.Operator) {
rwrap = true
}
@@ -88,6 +85,13 @@ func (n *BinaryNode) String() string {
}
}
+ if _, ok := n.Left.(*ConditionalNode); ok {
+ lwrap = true
+ }
+ if _, ok := n.Right.(*ConditionalNode); ok {
+ rwrap = true
+ }
+
if lwrap {
lhs = fmt.Sprintf("(%s)", n.Left.String())
} else {
@@ -108,20 +112,25 @@ func (n *ChainNode) String() string {
}
func (n *MemberNode) String() string {
+ node := n.Node.String()
+ if _, ok := n.Node.(*BinaryNode); ok {
+ node = fmt.Sprintf("(%s)", node)
+ }
+
if n.Optional {
if str, ok := n.Property.(*StringNode); ok && utils.IsValidIdentifier(str.Value) {
- return fmt.Sprintf("%s?.%s", n.Node.String(), str.Value)
+ return fmt.Sprintf("%s?.%s", node, str.Value)
} else {
- return fmt.Sprintf("%s?.[%s]", n.Node.String(), n.Property.String())
+ return fmt.Sprintf("%s?.[%s]", node, n.Property.String())
}
}
if str, ok := n.Property.(*StringNode); ok && utils.IsValidIdentifier(str.Value) {
if _, ok := n.Node.(*PointerNode); ok {
return fmt.Sprintf(".%s", str.Value)
}
- return fmt.Sprintf("%s.%s", n.Node.String(), str.Value)
+ return fmt.Sprintf("%s.%s", node, str.Value)
}
- return fmt.Sprintf("%s[%s]", n.Node.String(), n.Property.String())
+ return fmt.Sprintf("%s[%s]", node, n.Property.String())
}
func (n *SliceNode) String() string {
@@ -206,7 +215,7 @@ func (n *PairNode) String() string {
if utils.IsValidIdentifier(str.Value) {
return fmt.Sprintf("%s: %s", str.Value, n.Value.String())
}
- return fmt.Sprintf("%q: %s", str.String(), n.Value.String())
+ return fmt.Sprintf("%s: %s", str.String(), n.Value.String())
}
return fmt.Sprintf("(%s): %s", n.Key.String(), n.Value.String())
}
diff --git a/vendor/github.com/expr-lang/expr/ast/visitor.go b/vendor/github.com/expr-lang/expr/ast/visitor.go
index 287a755896..90bc9f1d0e 100644
--- a/vendor/github.com/expr-lang/expr/ast/visitor.go
+++ b/vendor/github.com/expr-lang/expr/ast/visitor.go
@@ -7,6 +7,9 @@ type Visitor interface {
}
func Walk(node *Node, v Visitor) {
+ if *node == nil {
+ return
+ }
switch n := (*node).(type) {
case *NilNode:
case *IdentifierNode:
diff --git a/vendor/github.com/expr-lang/expr/builtin/builtin.go b/vendor/github.com/expr-lang/expr/builtin/builtin.go
index 7bf377df22..cc6f197cdf 100644
--- a/vendor/github.com/expr-lang/expr/builtin/builtin.go
+++ b/vendor/github.com/expr-lang/expr/builtin/builtin.go
@@ -83,6 +83,11 @@ var Builtins = []*Function{
Predicate: true,
Types: types(new(func([]any, func(any) bool) int)),
},
+ {
+ Name: "sum",
+ Predicate: true,
+ Types: types(new(func([]any, func(any) bool) int)),
+ },
{
Name: "groupBy",
Predicate: true,
@@ -387,13 +392,6 @@ var Builtins = []*Function{
return validateAggregateFunc("min", args)
},
},
- {
- Name: "sum",
- Func: sum,
- Validate: func(args []reflect.Type) (reflect.Type, error) {
- return validateAggregateFunc("sum", args)
- },
- },
{
Name: "mean",
Func: func(args ...any) (any, error) {
@@ -474,9 +472,27 @@ var Builtins = []*Function{
{
Name: "now",
Func: func(args ...any) (any, error) {
- return time.Now(), nil
+ if len(args) == 0 {
+ return time.Now(), nil
+ }
+ if len(args) == 1 {
+ if tz, ok := args[0].(*time.Location); ok {
+ return time.Now().In(tz), nil
+ }
+ }
+ return nil, fmt.Errorf("invalid number of arguments (expected 0, got %d)", len(args))
+ },
+ Validate: func(args []reflect.Type) (reflect.Type, error) {
+ if len(args) == 0 {
+ return timeType, nil
+ }
+ if len(args) == 1 {
+ if args[0] != nil && args[0].AssignableTo(locationType) {
+ return timeType, nil
+ }
+ }
+ return anyType, fmt.Errorf("invalid number of arguments (expected 0, got %d)", len(args))
},
- Types: types(new(func() time.Time)),
},
{
Name: "duration",
@@ -488,9 +504,17 @@ var Builtins = []*Function{
{
Name: "date",
Func: func(args ...any) (any, error) {
+ tz, ok := args[0].(*time.Location)
+ if ok {
+ args = args[1:]
+ }
+
date := args[0].(string)
if len(args) == 2 {
layout := args[1].(string)
+ if tz != nil {
+ return time.ParseInLocation(layout, date, tz)
+ }
return time.Parse(layout, date)
}
if len(args) == 3 {
@@ -517,18 +541,43 @@ var Builtins = []*Function{
time.RFC1123,
}
for _, layout := range layouts {
- t, err := time.Parse(layout, date)
- if err == nil {
- return t, nil
+ if tz == nil {
+ t, err := time.Parse(layout, date)
+ if err == nil {
+ return t, nil
+ }
+ } else {
+ t, err := time.ParseInLocation(layout, date, tz)
+ if err == nil {
+ return t, nil
+ }
}
}
return nil, fmt.Errorf("invalid date %s", date)
},
- Types: types(
- new(func(string) time.Time),
- new(func(string, string) time.Time),
- new(func(string, string, string) time.Time),
- ),
+ Validate: func(args []reflect.Type) (reflect.Type, error) {
+ if len(args) < 1 {
+ return anyType, fmt.Errorf("invalid number of arguments (expected at least 1, got %d)", len(args))
+ }
+ if args[0] != nil && args[0].AssignableTo(locationType) {
+ args = args[1:]
+ }
+ if len(args) > 3 {
+ return anyType, fmt.Errorf("invalid number of arguments (expected at most 3, got %d)", len(args))
+ }
+ return timeType, nil
+ },
+ },
+ {
+ Name: "timezone",
+ Func: func(args ...any) (any, error) {
+ tz, err := time.LoadLocation(args[0].(string))
+ if err != nil {
+ return nil, err
+ }
+ return tz, nil
+ },
+ Types: types(time.LoadLocation),
},
{
Name: "first",
diff --git a/vendor/github.com/expr-lang/expr/builtin/lib.go b/vendor/github.com/expr-lang/expr/builtin/lib.go
index e3a6c0aef9..e3cd61b968 100644
--- a/vendor/github.com/expr-lang/expr/builtin/lib.go
+++ b/vendor/github.com/expr-lang/expr/builtin/lib.go
@@ -258,45 +258,6 @@ func String(arg any) any {
return fmt.Sprintf("%v", arg)
}
-func sum(args ...any) (any, error) {
- var total int
- var fTotal float64
-
- for _, arg := range args {
- rv := reflect.ValueOf(deref.Deref(arg))
-
- switch rv.Kind() {
- case reflect.Array, reflect.Slice:
- size := rv.Len()
- for i := 0; i < size; i++ {
- elemSum, err := sum(rv.Index(i).Interface())
- if err != nil {
- return nil, err
- }
- switch elemSum := elemSum.(type) {
- case int:
- total += elemSum
- case float64:
- fTotal += elemSum
- }
- }
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- total += int(rv.Int())
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- total += int(rv.Uint())
- case reflect.Float32, reflect.Float64:
- fTotal += rv.Float()
- default:
- return nil, fmt.Errorf("invalid argument for sum (type %T)", arg)
- }
- }
-
- if fTotal != 0.0 {
- return fTotal + float64(total), nil
- }
- return total, nil
-}
-
func minMax(name string, fn func(any, any) bool, args ...any) (any, error) {
var val any
for _, arg := range args {
diff --git a/vendor/github.com/expr-lang/expr/builtin/utils.go b/vendor/github.com/expr-lang/expr/builtin/utils.go
index 7d3b6ee8e7..29a95731a0 100644
--- a/vendor/github.com/expr-lang/expr/builtin/utils.go
+++ b/vendor/github.com/expr-lang/expr/builtin/utils.go
@@ -3,14 +3,17 @@ package builtin
import (
"fmt"
"reflect"
+ "time"
)
var (
- anyType = reflect.TypeOf(new(any)).Elem()
- integerType = reflect.TypeOf(0)
- floatType = reflect.TypeOf(float64(0))
- arrayType = reflect.TypeOf([]any{})
- mapType = reflect.TypeOf(map[any]any{})
+ anyType = reflect.TypeOf(new(any)).Elem()
+ integerType = reflect.TypeOf(0)
+ floatType = reflect.TypeOf(float64(0))
+ arrayType = reflect.TypeOf([]any{})
+ mapType = reflect.TypeOf(map[any]any{})
+ timeType = reflect.TypeOf(new(time.Time)).Elem()
+ locationType = reflect.TypeOf(new(time.Location))
)
func kind(t reflect.Type) reflect.Kind {
diff --git a/vendor/github.com/expr-lang/expr/checker/checker.go b/vendor/github.com/expr-lang/expr/checker/checker.go
index b46178d43a..c71a98f07e 100644
--- a/vendor/github.com/expr-lang/expr/checker/checker.go
+++ b/vendor/github.com/expr-lang/expr/checker/checker.go
@@ -13,6 +13,45 @@ import (
"github.com/expr-lang/expr/parser"
)
+// ParseCheck parses input expression and checks its types. Also, it applies
+// all provided patchers. In case of error, it returns error with a tree.
+func ParseCheck(input string, config *conf.Config) (*parser.Tree, error) {
+ tree, err := parser.ParseWithConfig(input, config)
+ if err != nil {
+ return tree, err
+ }
+
+ if len(config.Visitors) > 0 {
+ for i := 0; i < 1000; i++ {
+ more := false
+ for _, v := range config.Visitors {
+ // We need to perform types check, because some visitors may rely on
+ // types information available in the tree.
+ _, _ = Check(tree, config)
+
+ ast.Walk(&tree.Node, v)
+
+ if v, ok := v.(interface {
+ ShouldRepeat() bool
+ }); ok {
+ more = more || v.ShouldRepeat()
+ }
+ }
+ if !more {
+ break
+ }
+ }
+ }
+ _, err = Check(tree, config)
+ if err != nil {
+ return tree, err
+ }
+
+ return tree, nil
+}
+
+// Check checks types of the expression tree. It returns type of the expression
+// and error if any. If config is nil, then default configuration will be used.
func Check(tree *parser.Tree, config *conf.Config) (t reflect.Type, err error) {
if config == nil {
config = conf.New(nil)
@@ -653,6 +692,10 @@ func (v *checker) BuiltinNode(node *ast.BuiltinNode) (reflect.Type, info) {
return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection)
}
+ if len(node.Arguments) == 1 {
+ return integerType, info{}
+ }
+
v.begin(collection)
closure, _ := v.visit(node.Arguments[1])
v.end()
@@ -668,6 +711,29 @@ func (v *checker) BuiltinNode(node *ast.BuiltinNode) (reflect.Type, info) {
}
return v.error(node.Arguments[1], "predicate should has one input and one output param")
+ case "sum":
+ collection, _ := v.visit(node.Arguments[0])
+ if !isArray(collection) && !isAny(collection) {
+ return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection)
+ }
+
+ if len(node.Arguments) == 2 {
+ v.begin(collection)
+ closure, _ := v.visit(node.Arguments[1])
+ v.end()
+
+ if isFunc(closure) &&
+ closure.NumOut() == 1 &&
+ closure.NumIn() == 1 && isAny(closure.In(0)) {
+ return closure.Out(0), info{}
+ }
+ } else {
+ if isAny(collection) {
+ return anyType, info{}
+ }
+ return collection.Elem(), info{}
+ }
+
case "find", "findLast":
collection, _ := v.visit(node.Arguments[0])
if !isArray(collection) && !isAny(collection) {
@@ -978,7 +1044,7 @@ func (v *checker) checkArguments(
continue
}
- if !t.AssignableTo(in) && kind(t) != reflect.Interface {
+ if !(t.AssignableTo(in) || deref.Type(t).AssignableTo(in)) && kind(t) != reflect.Interface {
return anyType, &file.Error{
Location: arg.Location(),
Message: fmt.Sprintf("cannot use %v as argument (type %v) to call %v ", t, in, name),
@@ -1012,9 +1078,11 @@ func traverseAndReplaceIntegerNodesWithIntegerNodes(node *ast.Node, newType refl
case *ast.IntegerNode:
(*node).SetType(newType)
case *ast.UnaryNode:
+ (*node).SetType(newType)
unaryNode := (*node).(*ast.UnaryNode)
traverseAndReplaceIntegerNodesWithIntegerNodes(&unaryNode.Node, newType)
case *ast.BinaryNode:
+ // TODO: Binary node return type is dependent on the type of the operands. We can't just change the type of the node.
binaryNode := (*node).(*ast.BinaryNode)
switch binaryNode.Operator {
case "+", "-", "*":
diff --git a/vendor/github.com/expr-lang/expr/compiler/compiler.go b/vendor/github.com/expr-lang/expr/compiler/compiler.go
index 808b53c9b7..720f6a2652 100644
--- a/vendor/github.com/expr-lang/expr/compiler/compiler.go
+++ b/vendor/github.com/expr-lang/expr/compiler/compiler.go
@@ -2,6 +2,7 @@ package compiler
import (
"fmt"
+ "math"
"reflect"
"regexp"
@@ -92,6 +93,13 @@ type scope struct {
index int
}
+func (c *compiler) nodeParent() ast.Node {
+ if len(c.nodes) > 1 {
+ return c.nodes[len(c.nodes)-2]
+ }
+ return nil
+}
+
func (c *compiler) emitLocation(loc file.Location, op Opcode, arg int) int {
c.bytecode = append(c.bytecode, op)
current := len(c.bytecode)
@@ -322,22 +330,46 @@ func (c *compiler) IntegerNode(node *ast.IntegerNode) {
case reflect.Int:
c.emitPush(node.Value)
case reflect.Int8:
+ if node.Value > math.MaxInt8 || node.Value < math.MinInt8 {
+ panic(fmt.Sprintf("constant %d overflows int8", node.Value))
+ }
c.emitPush(int8(node.Value))
case reflect.Int16:
+ if node.Value > math.MaxInt16 || node.Value < math.MinInt16 {
+ panic(fmt.Sprintf("constant %d overflows int16", node.Value))
+ }
c.emitPush(int16(node.Value))
case reflect.Int32:
+ if node.Value > math.MaxInt32 || node.Value < math.MinInt32 {
+ panic(fmt.Sprintf("constant %d overflows int32", node.Value))
+ }
c.emitPush(int32(node.Value))
case reflect.Int64:
c.emitPush(int64(node.Value))
case reflect.Uint:
+ if node.Value < 0 {
+ panic(fmt.Sprintf("constant %d overflows uint", node.Value))
+ }
c.emitPush(uint(node.Value))
case reflect.Uint8:
+ if node.Value > math.MaxUint8 || node.Value < 0 {
+ panic(fmt.Sprintf("constant %d overflows uint8", node.Value))
+ }
c.emitPush(uint8(node.Value))
case reflect.Uint16:
+ if node.Value > math.MaxUint16 || node.Value < 0 {
+ panic(fmt.Sprintf("constant %d overflows uint16", node.Value))
+ }
c.emitPush(uint16(node.Value))
case reflect.Uint32:
+ if node.Value < 0 {
+ panic(fmt.Sprintf("constant %d overflows uint32", node.Value))
+ }
c.emitPush(uint32(node.Value))
case reflect.Uint64:
+ if node.Value < 0 {
+ panic(fmt.Sprintf("constant %d overflows uint64", node.Value))
+ }
c.emitPush(uint64(node.Value))
default:
c.emitPush(node.Value)
@@ -395,34 +427,12 @@ func (c *compiler) UnaryNode(node *ast.UnaryNode) {
}
func (c *compiler) BinaryNode(node *ast.BinaryNode) {
- l := kind(node.Left)
- r := kind(node.Right)
-
- leftIsSimple := isSimpleType(node.Left)
- rightIsSimple := isSimpleType(node.Right)
- leftAndRightAreSimple := leftIsSimple && rightIsSimple
-
switch node.Operator {
case "==":
- c.compile(node.Left)
- c.derefInNeeded(node.Left)
- c.compile(node.Right)
- c.derefInNeeded(node.Right)
-
- if l == r && l == reflect.Int && leftAndRightAreSimple {
- c.emit(OpEqualInt)
- } else if l == r && l == reflect.String && leftAndRightAreSimple {
- c.emit(OpEqualString)
- } else {
- c.emit(OpEqual)
- }
+ c.equalBinaryNode(node)
case "!=":
- c.compile(node.Left)
- c.derefInNeeded(node.Left)
- c.compile(node.Right)
- c.derefInNeeded(node.Right)
- c.emit(OpEqual)
+ c.equalBinaryNode(node)
c.emit(OpNot)
case "or", "||":
@@ -580,6 +590,28 @@ func (c *compiler) BinaryNode(node *ast.BinaryNode) {
}
}
+func (c *compiler) equalBinaryNode(node *ast.BinaryNode) {
+ l := kind(node.Left.Type())
+ r := kind(node.Right.Type())
+
+ leftIsSimple := isSimpleType(node.Left)
+ rightIsSimple := isSimpleType(node.Right)
+ leftAndRightAreSimple := leftIsSimple && rightIsSimple
+
+ c.compile(node.Left)
+ c.derefInNeeded(node.Left)
+ c.compile(node.Right)
+ c.derefInNeeded(node.Right)
+
+ if l == r && l == reflect.Int && leftAndRightAreSimple {
+ c.emit(OpEqualInt)
+ } else if l == r && l == reflect.String && leftAndRightAreSimple {
+ c.emit(OpEqualString)
+ } else {
+ c.emit(OpEqual)
+ }
+}
+
func isSimpleType(node ast.Node) bool {
if node == nil {
return false
@@ -594,9 +626,21 @@ func isSimpleType(node ast.Node) bool {
func (c *compiler) ChainNode(node *ast.ChainNode) {
c.chains = append(c.chains, []int{})
c.compile(node.Node)
- // Chain activate (got nit somewhere)
for _, ph := range c.chains[len(c.chains)-1] {
- c.patchJump(ph)
+ c.patchJump(ph) // If chain activated jump here (got nit somewhere).
+ }
+ parent := c.nodeParent()
+ if binary, ok := parent.(*ast.BinaryNode); ok && binary.Operator == "??" {
+ // If chain is used in nil coalescing operator, we can omit
+ // nil push at the end of the chain. The ?? operator will
+ // handle it.
+ } else {
+ // We need to put the nil on the stack, otherwise "typed"
+ // nil will be used as a result of the chain.
+ j := c.emit(OpJumpIfNotNil, placeholder)
+ c.emit(OpPop)
+ c.emit(OpNil)
+ c.patchJump(j)
}
c.chains = c.chains[:len(c.chains)-1]
}
@@ -682,9 +726,44 @@ func (c *compiler) SliceNode(node *ast.SliceNode) {
}
func (c *compiler) CallNode(node *ast.CallNode) {
- for _, arg := range node.Arguments {
- c.compile(arg)
+ fn := node.Callee.Type()
+ if kind(fn) == reflect.Func {
+ fnInOffset := 0
+ fnNumIn := fn.NumIn()
+ switch callee := node.Callee.(type) {
+ case *ast.MemberNode:
+ if prop, ok := callee.Property.(*ast.StringNode); ok {
+ if _, ok = callee.Node.Type().MethodByName(prop.Value); ok && callee.Node.Type().Kind() != reflect.Interface {
+ fnInOffset = 1
+ fnNumIn--
+ }
+ }
+ case *ast.IdentifierNode:
+ if t, ok := c.config.Types[callee.Value]; ok && t.Method {
+ fnInOffset = 1
+ fnNumIn--
+ }
+ }
+ for i, arg := range node.Arguments {
+ c.compile(arg)
+ if k := kind(arg.Type()); k == reflect.Ptr || k == reflect.Interface {
+ var in reflect.Type
+ if fn.IsVariadic() && i >= fnNumIn-1 {
+ in = fn.In(fn.NumIn() - 1).Elem()
+ } else {
+ in = fn.In(i + fnInOffset)
+ }
+ if k = kind(in); k != reflect.Ptr && k != reflect.Interface {
+ c.emit(OpDeref)
+ }
+ }
+ }
+ } else {
+ for _, arg := range node.Arguments {
+ c.compile(arg)
+ }
}
+
if ident, ok := node.Callee.(*ast.IdentifierNode); ok {
if c.config != nil {
if fn, ok := c.config.Functions[ident.Value]; ok {
@@ -800,7 +879,11 @@ func (c *compiler) BuiltinNode(node *ast.BuiltinNode) {
c.compile(node.Arguments[0])
c.emit(OpBegin)
c.emitLoop(func() {
- c.compile(node.Arguments[1])
+ if len(node.Arguments) == 2 {
+ c.compile(node.Arguments[1])
+ } else {
+ c.emit(OpPointer)
+ }
c.emitCond(func() {
c.emit(OpIncrementCount)
})
@@ -809,6 +892,25 @@ func (c *compiler) BuiltinNode(node *ast.BuiltinNode) {
c.emit(OpEnd)
return
+ case "sum":
+ c.compile(node.Arguments[0])
+ c.emit(OpBegin)
+ c.emit(OpInt, 0)
+ c.emit(OpSetAcc)
+ c.emitLoop(func() {
+ if len(node.Arguments) == 2 {
+ c.compile(node.Arguments[1])
+ } else {
+ c.emit(OpPointer)
+ }
+ c.emit(OpGetAcc)
+ c.emit(OpAdd)
+ c.emit(OpSetAcc)
+ })
+ c.emit(OpGetAcc)
+ c.emit(OpEnd)
+ return
+
case "find":
c.compile(node.Arguments[0])
c.emit(OpBegin)
@@ -1094,7 +1196,7 @@ func (c *compiler) PairNode(node *ast.PairNode) {
}
func (c *compiler) derefInNeeded(node ast.Node) {
- switch kind(node) {
+ switch kind(node.Type()) {
case reflect.Ptr, reflect.Interface:
c.emit(OpDeref)
}
@@ -1113,8 +1215,7 @@ func (c *compiler) optimize() {
}
}
-func kind(node ast.Node) reflect.Kind {
- t := node.Type()
+func kind(t reflect.Type) reflect.Kind {
if t == nil {
return reflect.Invalid
}
diff --git a/vendor/github.com/expr-lang/expr/expr.go b/vendor/github.com/expr-lang/expr/expr.go
index ba786c0174..8c619e1c4d 100644
--- a/vendor/github.com/expr-lang/expr/expr.go
+++ b/vendor/github.com/expr-lang/expr/expr.go
@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"reflect"
+ "time"
"github.com/expr-lang/expr/ast"
"github.com/expr-lang/expr/builtin"
@@ -12,7 +13,6 @@ import (
"github.com/expr-lang/expr/conf"
"github.com/expr-lang/expr/file"
"github.com/expr-lang/expr/optimizer"
- "github.com/expr-lang/expr/parser"
"github.com/expr-lang/expr/patcher"
"github.com/expr-lang/expr/vm"
)
@@ -183,6 +183,17 @@ func WithContext(name string) Option {
})
}
+// Timezone sets default timezone for date() and now() builtin functions.
+func Timezone(name string) Option {
+ tz, err := time.LoadLocation(name)
+ if err != nil {
+ panic(err)
+ }
+ return Patch(patcher.WithTimezone{
+ Location: tz,
+ })
+}
+
// Compile parses and compiles given input expression to bytecode program.
func Compile(input string, ops ...Option) (*vm.Program, error) {
config := conf.CreateNew()
@@ -194,33 +205,7 @@ func Compile(input string, ops ...Option) (*vm.Program, error) {
}
config.Check()
- tree, err := parser.ParseWithConfig(input, config)
- if err != nil {
- return nil, err
- }
-
- if len(config.Visitors) > 0 {
- for i := 0; i < 1000; i++ {
- more := false
- for _, v := range config.Visitors {
- // We need to perform types check, because some visitors may rely on
- // types information available in the tree.
- _, _ = checker.Check(tree, config)
-
- ast.Walk(&tree.Node, v)
-
- if v, ok := v.(interface {
- ShouldRepeat() bool
- }); ok {
- more = more || v.ShouldRepeat()
- }
- }
- if !more {
- break
- }
- }
- }
- _, err = checker.Check(tree, config)
+ tree, err := checker.ParseCheck(input, config)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/expr-lang/expr/file/error.go b/vendor/github.com/expr-lang/expr/file/error.go
index edf202b045..8ff85dfa5f 100644
--- a/vendor/github.com/expr-lang/expr/file/error.go
+++ b/vendor/github.com/expr-lang/expr/file/error.go
@@ -8,22 +8,36 @@ import (
type Error struct {
Location
- Message string
- Snippet string
- Prev error
+ Line int `json:"line"`
+ Column int `json:"column"`
+ Message string `json:"message"`
+ Snippet string `json:"snippet"`
+ Prev error `json:"prev"`
}
func (e *Error) Error() string {
return e.format()
}
-func (e *Error) Bind(source *Source) *Error {
- if snippet, found := source.Snippet(e.Location.Line); found {
+func (e *Error) Bind(source Source) *Error {
+ e.Line = 1
+ for i, r := range source {
+ if i == e.From {
+ break
+ }
+ if r == '\n' {
+ e.Line++
+ e.Column = 0
+ } else {
+ e.Column++
+ }
+ }
+ if snippet, found := source.Snippet(e.Line); found {
snippet := strings.Replace(snippet, "\t", " ", -1)
srcLine := "\n | " + snippet
var bytes = []byte(snippet)
var indLine = "\n | "
- for i := 0; i < e.Location.Column && len(bytes) > 0; i++ {
+ for i := 0; i < e.Column && len(bytes) > 0; i++ {
_, sz := utf8.DecodeRune(bytes)
bytes = bytes[sz:]
if sz > 1 {
@@ -54,7 +68,7 @@ func (e *Error) Wrap(err error) {
}
func (e *Error) format() string {
- if e.Location.Empty() {
+ if e.Snippet == "" {
return e.Message
}
return fmt.Sprintf(
diff --git a/vendor/github.com/expr-lang/expr/file/location.go b/vendor/github.com/expr-lang/expr/file/location.go
index a92e27f0b1..6c6bc2427e 100644
--- a/vendor/github.com/expr-lang/expr/file/location.go
+++ b/vendor/github.com/expr-lang/expr/file/location.go
@@ -1,10 +1,6 @@
package file
type Location struct {
- Line int // The 1-based line of the location.
- Column int // The 0-based column number of the location.
-}
-
-func (l Location) Empty() bool {
- return l.Column == 0 && l.Line == 0
+ From int `json:"from"`
+ To int `json:"to"`
}
diff --git a/vendor/github.com/expr-lang/expr/file/source.go b/vendor/github.com/expr-lang/expr/file/source.go
index d86a546b10..8e2b2d1540 100644
--- a/vendor/github.com/expr-lang/expr/file/source.go
+++ b/vendor/github.com/expr-lang/expr/file/source.go
@@ -1,78 +1,47 @@
package file
import (
- "encoding/json"
"strings"
"unicode/utf8"
)
-type Source struct {
- contents []rune
- lineOffsets []int32
-}
-
-func NewSource(contents string) *Source {
- s := &Source{
- contents: []rune(contents),
- }
- s.updateOffsets()
- return s
-}
-
-func (s *Source) MarshalJSON() ([]byte, error) {
- return json.Marshal(s.contents)
-}
-
-func (s *Source) UnmarshalJSON(b []byte) error {
- contents := make([]rune, 0)
- err := json.Unmarshal(b, &contents)
- if err != nil {
- return err
- }
+type Source []rune
- s.contents = contents
- s.updateOffsets()
- return nil
+func NewSource(contents string) Source {
+ return []rune(contents)
}
-func (s *Source) Content() string {
- return string(s.contents)
+func (s Source) String() string {
+ return string(s)
}
-func (s *Source) Snippet(line int) (string, bool) {
+func (s Source) Snippet(line int) (string, bool) {
if s == nil {
return "", false
}
- charStart, found := s.findLineOffset(line)
- if !found || len(s.contents) == 0 {
+ lines := strings.Split(string(s), "\n")
+ lineOffsets := make([]int, len(lines))
+ var offset int
+ for i, line := range lines {
+ offset = offset + utf8.RuneCountInString(line) + 1
+ lineOffsets[i] = offset
+ }
+ charStart, found := getLineOffset(lineOffsets, line)
+ if !found || len(s) == 0 {
return "", false
}
- charEnd, found := s.findLineOffset(line + 1)
+ charEnd, found := getLineOffset(lineOffsets, line+1)
if found {
- return string(s.contents[charStart : charEnd-1]), true
- }
- return string(s.contents[charStart:]), true
-}
-
-// updateOffsets compute line offsets up front as they are referred to frequently.
-func (s *Source) updateOffsets() {
- lines := strings.Split(string(s.contents), "\n")
- offsets := make([]int32, len(lines))
- var offset int32
- for i, line := range lines {
- offset = offset + int32(utf8.RuneCountInString(line)) + 1
- offsets[int32(i)] = offset
+ return string(s[charStart : charEnd-1]), true
}
- s.lineOffsets = offsets
+ return string(s[charStart:]), true
}
-// findLineOffset returns the offset where the (1-indexed) line begins,
-// or false if line doesn't exist.
-func (s *Source) findLineOffset(line int) (int32, bool) {
+func getLineOffset(lineOffsets []int, line int) (int, bool) {
if line == 1 {
return 0, true
- } else if line > 1 && line <= len(s.lineOffsets) {
- offset := s.lineOffsets[line-2]
+ } else if line > 1 && line <= len(lineOffsets) {
+ offset := lineOffsets[line-2]
return offset, true
}
return -1, false
diff --git a/vendor/github.com/expr-lang/expr/optimizer/optimizer.go b/vendor/github.com/expr-lang/expr/optimizer/optimizer.go
index 6d1fb0b546..4ceb3fa43a 100644
--- a/vendor/github.com/expr-lang/expr/optimizer/optimizer.go
+++ b/vendor/github.com/expr-lang/expr/optimizer/optimizer.go
@@ -37,5 +37,7 @@ func Optimize(node *Node, config *conf.Config) error {
Walk(node, &filterLast{})
Walk(node, &filterFirst{})
Walk(node, &predicateCombination{})
+ Walk(node, &sumArray{})
+ Walk(node, &sumMap{})
return nil
}
diff --git a/vendor/github.com/expr-lang/expr/optimizer/predicate_combination.go b/vendor/github.com/expr-lang/expr/optimizer/predicate_combination.go
index 2733781df5..6e8a7f7cfc 100644
--- a/vendor/github.com/expr-lang/expr/optimizer/predicate_combination.go
+++ b/vendor/github.com/expr-lang/expr/optimizer/predicate_combination.go
@@ -5,6 +5,14 @@ import (
"github.com/expr-lang/expr/parser/operator"
)
+/*
+predicateCombination is a visitor that combines multiple predicate calls into a single call.
+For example, the following expression:
+
+ all(x, x > 1) && all(x, x < 10) -> all(x, x > 1 && x < 10)
+ any(x, x > 1) || any(x, x < 10) -> any(x, x > 1 || x < 10)
+ none(x, x > 1) && none(x, x < 10) -> none(x, x > 1 || x < 10)
+*/
type predicateCombination struct{}
func (v *predicateCombination) Visit(node *Node) {
@@ -36,10 +44,12 @@ func (v *predicateCombination) Visit(node *Node) {
}
func combinedOperator(fn, op string) (string, bool) {
- switch fn {
- case "all", "any":
+ switch {
+ case fn == "all" && (op == "and" || op == "&&"):
+ return op, true
+ case fn == "any" && (op == "or" || op == "||"):
return op, true
- case "one", "none":
+ case fn == "none" && (op == "and" || op == "&&"):
switch op {
case "and":
return "or", true
diff --git a/vendor/github.com/expr-lang/expr/optimizer/sum_array.go b/vendor/github.com/expr-lang/expr/optimizer/sum_array.go
new file mode 100644
index 0000000000..0a05d1f2e6
--- /dev/null
+++ b/vendor/github.com/expr-lang/expr/optimizer/sum_array.go
@@ -0,0 +1,37 @@
+package optimizer
+
+import (
+ "fmt"
+
+ . "github.com/expr-lang/expr/ast"
+)
+
+type sumArray struct{}
+
+func (*sumArray) Visit(node *Node) {
+ if sumBuiltin, ok := (*node).(*BuiltinNode); ok &&
+ sumBuiltin.Name == "sum" &&
+ len(sumBuiltin.Arguments) == 1 {
+ if array, ok := sumBuiltin.Arguments[0].(*ArrayNode); ok &&
+ len(array.Nodes) >= 2 {
+ Patch(node, sumArrayFold(array))
+ }
+ }
+}
+
+func sumArrayFold(array *ArrayNode) *BinaryNode {
+ if len(array.Nodes) > 2 {
+ return &BinaryNode{
+ Operator: "+",
+ Left: array.Nodes[0],
+ Right: sumArrayFold(&ArrayNode{Nodes: array.Nodes[1:]}),
+ }
+ } else if len(array.Nodes) == 2 {
+ return &BinaryNode{
+ Operator: "+",
+ Left: array.Nodes[0],
+ Right: array.Nodes[1],
+ }
+ }
+ panic(fmt.Errorf("sumArrayFold: invalid array length %d", len(array.Nodes)))
+}
diff --git a/vendor/github.com/expr-lang/expr/optimizer/sum_map.go b/vendor/github.com/expr-lang/expr/optimizer/sum_map.go
new file mode 100644
index 0000000000..a41a537327
--- /dev/null
+++ b/vendor/github.com/expr-lang/expr/optimizer/sum_map.go
@@ -0,0 +1,25 @@
+package optimizer
+
+import (
+ . "github.com/expr-lang/expr/ast"
+)
+
+type sumMap struct{}
+
+func (*sumMap) Visit(node *Node) {
+ if sumBuiltin, ok := (*node).(*BuiltinNode); ok &&
+ sumBuiltin.Name == "sum" &&
+ len(sumBuiltin.Arguments) == 1 {
+ if mapBuiltin, ok := sumBuiltin.Arguments[0].(*BuiltinNode); ok &&
+ mapBuiltin.Name == "map" &&
+ len(mapBuiltin.Arguments) == 2 {
+ Patch(node, &BuiltinNode{
+ Name: "sum",
+ Arguments: []Node{
+ mapBuiltin.Arguments[0],
+ mapBuiltin.Arguments[1],
+ },
+ })
+ }
+ }
+}
diff --git a/vendor/github.com/expr-lang/expr/parser/lexer/lexer.go b/vendor/github.com/expr-lang/expr/parser/lexer/lexer.go
index c32658637f..e6b06c09d0 100644
--- a/vendor/github.com/expr-lang/expr/parser/lexer/lexer.go
+++ b/vendor/github.com/expr-lang/expr/parser/lexer/lexer.go
@@ -3,20 +3,18 @@ package lexer
import (
"fmt"
"strings"
- "unicode/utf8"
"github.com/expr-lang/expr/file"
)
-func Lex(source *file.Source) ([]Token, error) {
+func Lex(source file.Source) ([]Token, error) {
l := &lexer{
- input: source.Content(),
+ source: source,
tokens: make([]Token, 0),
+ start: 0,
+ end: 0,
}
-
- l.loc = file.Location{Line: 1, Column: 0}
- l.prev = l.loc
- l.startLoc = l.loc
+ l.commit()
for state := root; state != nil; {
state = state(l)
@@ -30,34 +28,25 @@ func Lex(source *file.Source) ([]Token, error) {
}
type lexer struct {
- input string
+ source file.Source
tokens []Token
- start, end int // current position in input
- width int // last rune width
- startLoc file.Location // start location
- prev, loc file.Location // prev location of end location, end location
+ start, end int
err *file.Error
}
const eof rune = -1
+func (l *lexer) commit() {
+ l.start = l.end
+}
+
func (l *lexer) next() rune {
- if l.end >= len(l.input) {
- l.width = 0
+ if l.end >= len(l.source) {
+ l.end++
return eof
}
- r, w := utf8.DecodeRuneInString(l.input[l.end:])
- l.width = w
- l.end += w
-
- l.prev = l.loc
- if r == '\n' {
- l.loc.Line++
- l.loc.Column = 0
- } else {
- l.loc.Column++
- }
-
+ r := l.source[l.end]
+ l.end++
return r
}
@@ -68,8 +57,7 @@ func (l *lexer) peek() rune {
}
func (l *lexer) backup() {
- l.end -= l.width
- l.loc = l.prev
+ l.end--
}
func (l *lexer) emit(t Kind) {
@@ -78,35 +66,39 @@ func (l *lexer) emit(t Kind) {
func (l *lexer) emitValue(t Kind, value string) {
l.tokens = append(l.tokens, Token{
- Location: l.startLoc,
+ Location: file.Location{From: l.start, To: l.end},
Kind: t,
Value: value,
})
- l.start = l.end
- l.startLoc = l.loc
+ l.commit()
}
func (l *lexer) emitEOF() {
+ from := l.end - 2
+ if from < 0 {
+ from = 0
+ }
+ to := l.end - 1
+ if to < 0 {
+ to = 0
+ }
l.tokens = append(l.tokens, Token{
- Location: l.prev, // Point to previous position for better error messages.
+ Location: file.Location{From: from, To: to},
Kind: EOF,
})
- l.start = l.end
- l.startLoc = l.loc
+ l.commit()
}
func (l *lexer) skip() {
- l.start = l.end
- l.startLoc = l.loc
+ l.commit()
}
func (l *lexer) word() string {
- return l.input[l.start:l.end]
-}
-
-func (l *lexer) ignore() {
- l.start = l.end
- l.startLoc = l.loc
+ // TODO: boundary check is NOT needed here, but for some reason CI fuzz tests are failing.
+ if l.start > len(l.source) || l.end > len(l.source) {
+ return "__invalid__"
+ }
+ return string(l.source[l.start:l.end])
}
func (l *lexer) accept(valid string) bool {
@@ -132,18 +124,18 @@ func (l *lexer) skipSpaces() {
}
func (l *lexer) acceptWord(word string) bool {
- pos, loc, prev := l.end, l.loc, l.prev
+ pos := l.end
l.skipSpaces()
for _, ch := range word {
if l.next() != ch {
- l.end, l.loc, l.prev = pos, loc, prev
+ l.end = pos
return false
}
}
if r := l.peek(); r != ' ' && r != eof {
- l.end, l.loc, l.prev = pos, loc, prev
+ l.end = pos
return false
}
@@ -153,8 +145,11 @@ func (l *lexer) acceptWord(word string) bool {
func (l *lexer) error(format string, args ...any) stateFn {
if l.err == nil { // show first error
l.err = &file.Error{
- Location: l.loc,
- Message: fmt.Sprintf(format, args...),
+ Location: file.Location{
+ From: l.end - 1,
+ To: l.end,
+ },
+ Message: fmt.Sprintf(format, args...),
}
}
return nil
@@ -230,6 +225,6 @@ func (l *lexer) scanRawString(quote rune) (n int) {
ch = l.next()
n++
}
- l.emitValue(String, l.input[l.start+1:l.end-1])
+ l.emitValue(String, string(l.source[l.start+1:l.end-1]))
return
}
diff --git a/vendor/github.com/expr-lang/expr/parser/lexer/state.go b/vendor/github.com/expr-lang/expr/parser/lexer/state.go
index 72f02bf4ef..d351e2f5c8 100644
--- a/vendor/github.com/expr-lang/expr/parser/lexer/state.go
+++ b/vendor/github.com/expr-lang/expr/parser/lexer/state.go
@@ -14,7 +14,7 @@ func root(l *lexer) stateFn {
l.emitEOF()
return nil
case utils.IsSpace(r):
- l.ignore()
+ l.skip()
return root
case r == '\'' || r == '"':
l.scanString(r)
@@ -83,14 +83,14 @@ func (l *lexer) scanNumber() bool {
}
}
l.acceptRun(digits)
- loc, prev, end := l.loc, l.prev, l.end
+ end := l.end
if l.accept(".") {
// Lookup for .. operator: if after dot there is another dot (1..2), it maybe a range operator.
if l.peek() == '.' {
// We can't backup() here, as it would require two backups,
// and backup() func supports only one for now. So, save and
// restore it here.
- l.loc, l.prev, l.end = loc, prev, end
+ l.end = end
return true
}
l.acceptRun(digits)
@@ -147,7 +147,7 @@ func not(l *lexer) stateFn {
l.skipSpaces()
- pos, loc, prev := l.end, l.loc, l.prev
+ end := l.end
// Get the next word.
for {
@@ -164,7 +164,7 @@ func not(l *lexer) stateFn {
case "in", "matches", "contains", "startsWith", "endsWith":
l.emit(Operator)
default:
- l.end, l.loc, l.prev = pos, loc, prev
+ l.end = end
}
return root
}
@@ -193,7 +193,7 @@ func singleLineComment(l *lexer) stateFn {
break
}
}
- l.ignore()
+ l.skip()
return root
}
@@ -207,7 +207,7 @@ func multiLineComment(l *lexer) stateFn {
break
}
}
- l.ignore()
+ l.skip()
return root
}
diff --git a/vendor/github.com/expr-lang/expr/parser/parser.go b/vendor/github.com/expr-lang/expr/parser/parser.go
index 9cb79cbbb4..77b2a700a3 100644
--- a/vendor/github.com/expr-lang/expr/parser/parser.go
+++ b/vendor/github.com/expr-lang/expr/parser/parser.go
@@ -33,7 +33,8 @@ var predicates = map[string]struct {
"one": {[]arg{expr, closure}},
"filter": {[]arg{expr, closure}},
"map": {[]arg{expr, closure}},
- "count": {[]arg{expr, closure}},
+ "count": {[]arg{expr, closure | optional}},
+ "sum": {[]arg{expr, closure | optional}},
"find": {[]arg{expr, closure}},
"findIndex": {[]arg{expr, closure}},
"findLast": {[]arg{expr, closure}},
@@ -54,7 +55,7 @@ type parser struct {
type Tree struct {
Node Node
- Source *file.Source
+ Source file.Source
}
func Parse(input string) (*Tree, error) {
@@ -83,14 +84,16 @@ func ParseWithConfig(input string, config *conf.Config) (*Tree, error) {
p.error("unexpected token %v", p.current)
}
+ tree := &Tree{
+ Node: node,
+ Source: source,
+ }
+
if p.err != nil {
- return nil, p.err.Bind(source)
+ return tree, p.err.Bind(source)
}
- return &Tree{
- Node: node,
- Source: source,
- }, nil
+ return tree, nil
}
func (p *parser) error(format string, args ...any) {
diff --git a/vendor/github.com/expr-lang/expr/patcher/with_context.go b/vendor/github.com/expr-lang/expr/patcher/with_context.go
index 55b6042614..f9861a2c2f 100644
--- a/vendor/github.com/expr-lang/expr/patcher/with_context.go
+++ b/vendor/github.com/expr-lang/expr/patcher/with_context.go
@@ -22,11 +22,18 @@ func (w WithContext) Visit(node *ast.Node) {
if fn.Kind() != reflect.Func {
return
}
- if fn.NumIn() == 0 {
- return
- }
- if fn.In(0).String() != "context.Context" {
+ switch fn.NumIn() {
+ case 0:
return
+ case 1:
+ if fn.In(0).String() != "context.Context" {
+ return
+ }
+ default:
+ if fn.In(0).String() != "context.Context" &&
+ fn.In(1).String() != "context.Context" {
+ return
+ }
}
ast.Patch(node, &ast.CallNode{
Callee: call.Callee,
diff --git a/vendor/github.com/expr-lang/expr/patcher/with_timezone.go b/vendor/github.com/expr-lang/expr/patcher/with_timezone.go
new file mode 100644
index 0000000000..83eb28e95a
--- /dev/null
+++ b/vendor/github.com/expr-lang/expr/patcher/with_timezone.go
@@ -0,0 +1,25 @@
+package patcher
+
+import (
+ "time"
+
+ "github.com/expr-lang/expr/ast"
+)
+
+// WithTimezone passes Location to date() and now() functions.
+type WithTimezone struct {
+ Location *time.Location
+}
+
+func (t WithTimezone) Visit(node *ast.Node) {
+ if btin, ok := (*node).(*ast.BuiltinNode); ok {
+ switch btin.Name {
+ case "date", "now":
+ loc := &ast.ConstantNode{Value: t.Location}
+ ast.Patch(node, &ast.BuiltinNode{
+ Name: btin.Name,
+ Arguments: append([]ast.Node{loc}, btin.Arguments...),
+ })
+ }
+ }
+}
diff --git a/vendor/github.com/expr-lang/expr/vm/program.go b/vendor/github.com/expr-lang/expr/vm/program.go
index 9895467441..15ce26f5b2 100644
--- a/vendor/github.com/expr-lang/expr/vm/program.go
+++ b/vendor/github.com/expr-lang/expr/vm/program.go
@@ -21,7 +21,7 @@ type Program struct {
Arguments []int
Constants []any
- source *file.Source
+ source file.Source
node ast.Node
locations []file.Location
variables int
@@ -32,7 +32,7 @@ type Program struct {
// NewProgram returns a new Program. It's used by the compiler.
func NewProgram(
- source *file.Source,
+ source file.Source,
node ast.Node,
locations []file.Location,
variables int,
@@ -58,7 +58,7 @@ func NewProgram(
}
// Source returns origin file.Source.
-func (program *Program) Source() *file.Source {
+func (program *Program) Source() file.Source {
return program.source
}
diff --git a/vendor/github.com/expr-lang/expr/vm/runtime/helpers[generated].go b/vendor/github.com/expr-lang/expr/vm/runtime/helpers[generated].go
index 3529fdd586..d950f11114 100644
--- a/vendor/github.com/expr-lang/expr/vm/runtime/helpers[generated].go
+++ b/vendor/github.com/expr-lang/expr/vm/runtime/helpers[generated].go
@@ -334,6 +334,344 @@ func Equal(a, b interface{}) bool {
case float64:
return float64(x) == float64(y)
}
+ case []any:
+ switch y := b.(type) {
+ case []string:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if !Equal(x[i], y[i]) {
+ return false
+ }
+ }
+ return true
+ case []uint:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if !Equal(x[i], y[i]) {
+ return false
+ }
+ }
+ return true
+ case []uint8:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if !Equal(x[i], y[i]) {
+ return false
+ }
+ }
+ return true
+ case []uint16:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if !Equal(x[i], y[i]) {
+ return false
+ }
+ }
+ return true
+ case []uint32:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if !Equal(x[i], y[i]) {
+ return false
+ }
+ }
+ return true
+ case []uint64:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if !Equal(x[i], y[i]) {
+ return false
+ }
+ }
+ return true
+ case []int:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if !Equal(x[i], y[i]) {
+ return false
+ }
+ }
+ return true
+ case []int8:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if !Equal(x[i], y[i]) {
+ return false
+ }
+ }
+ return true
+ case []int16:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if !Equal(x[i], y[i]) {
+ return false
+ }
+ }
+ return true
+ case []int32:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if !Equal(x[i], y[i]) {
+ return false
+ }
+ }
+ return true
+ case []int64:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if !Equal(x[i], y[i]) {
+ return false
+ }
+ }
+ return true
+ case []float32:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if !Equal(x[i], y[i]) {
+ return false
+ }
+ }
+ return true
+ case []float64:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if !Equal(x[i], y[i]) {
+ return false
+ }
+ }
+ return true
+ case []any:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if !Equal(x[i], y[i]) {
+ return false
+ }
+ }
+ return true
+ }
+ case []string:
+ switch y := b.(type) {
+ case []any:
+ return Equal(y, x)
+ case []string:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if x[i] != y[i] {
+ return false
+ }
+ }
+ return true
+ }
+ case []uint:
+ switch y := b.(type) {
+ case []any:
+ return Equal(y, x)
+ case []uint:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if x[i] != y[i] {
+ return false
+ }
+ }
+ return true
+ }
+ case []uint8:
+ switch y := b.(type) {
+ case []any:
+ return Equal(y, x)
+ case []uint8:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if x[i] != y[i] {
+ return false
+ }
+ }
+ return true
+ }
+ case []uint16:
+ switch y := b.(type) {
+ case []any:
+ return Equal(y, x)
+ case []uint16:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if x[i] != y[i] {
+ return false
+ }
+ }
+ return true
+ }
+ case []uint32:
+ switch y := b.(type) {
+ case []any:
+ return Equal(y, x)
+ case []uint32:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if x[i] != y[i] {
+ return false
+ }
+ }
+ return true
+ }
+ case []uint64:
+ switch y := b.(type) {
+ case []any:
+ return Equal(y, x)
+ case []uint64:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if x[i] != y[i] {
+ return false
+ }
+ }
+ return true
+ }
+ case []int:
+ switch y := b.(type) {
+ case []any:
+ return Equal(y, x)
+ case []int:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if x[i] != y[i] {
+ return false
+ }
+ }
+ return true
+ }
+ case []int8:
+ switch y := b.(type) {
+ case []any:
+ return Equal(y, x)
+ case []int8:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if x[i] != y[i] {
+ return false
+ }
+ }
+ return true
+ }
+ case []int16:
+ switch y := b.(type) {
+ case []any:
+ return Equal(y, x)
+ case []int16:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if x[i] != y[i] {
+ return false
+ }
+ }
+ return true
+ }
+ case []int32:
+ switch y := b.(type) {
+ case []any:
+ return Equal(y, x)
+ case []int32:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if x[i] != y[i] {
+ return false
+ }
+ }
+ return true
+ }
+ case []int64:
+ switch y := b.(type) {
+ case []any:
+ return Equal(y, x)
+ case []int64:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if x[i] != y[i] {
+ return false
+ }
+ }
+ return true
+ }
+ case []float32:
+ switch y := b.(type) {
+ case []any:
+ return Equal(y, x)
+ case []float32:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if x[i] != y[i] {
+ return false
+ }
+ }
+ return true
+ }
+ case []float64:
+ switch y := b.(type) {
+ case []any:
+ return Equal(y, x)
+ case []float64:
+ if len(x) != len(y) {
+ return false
+ }
+ for i := range x {
+ if x[i] != y[i] {
+ return false
+ }
+ }
+ return true
+ }
case string:
switch y := b.(type) {
case string:
diff --git a/vendor/github.com/expr-lang/expr/vm/runtime/runtime.go b/vendor/github.com/expr-lang/expr/vm/runtime/runtime.go
index 7da1320de3..cd48a280dc 100644
--- a/vendor/github.com/expr-lang/expr/vm/runtime/runtime.go
+++ b/vendor/github.com/expr-lang/expr/vm/runtime/runtime.go
@@ -35,8 +35,12 @@ func Fetch(from, i any) any {
switch v.Kind() {
case reflect.Array, reflect.Slice, reflect.String:
index := ToInt(i)
+ l := v.Len()
if index < 0 {
- index = v.Len() + index
+ index = l + index
+ }
+ if index < 0 || index >= l {
+ panic(fmt.Sprintf("index out of range: %v (array length is %v)", index, l))
}
value := v.Index(index)
if value.IsValid() {
diff --git a/vendor/github.com/expr-lang/expr/vm/vm.go b/vendor/github.com/expr-lang/expr/vm/vm.go
index 7e933ce740..fa1223b420 100644
--- a/vendor/github.com/expr-lang/expr/vm/vm.go
+++ b/vendor/github.com/expr-lang/expr/vm/vm.go
@@ -274,31 +274,50 @@ func (vm *VM) Run(program *Program, env any) (_ any, err error) {
case OpMatches:
b := vm.pop()
a := vm.pop()
+ if runtime.IsNil(a) || runtime.IsNil(b) {
+ vm.push(false)
+ break
+ }
match, err := regexp.MatchString(b.(string), a.(string))
if err != nil {
panic(err)
}
-
vm.push(match)
case OpMatchesConst:
a := vm.pop()
+ if runtime.IsNil(a) {
+ vm.push(false)
+ break
+ }
r := program.Constants[arg].(*regexp.Regexp)
vm.push(r.MatchString(a.(string)))
case OpContains:
b := vm.pop()
a := vm.pop()
+ if runtime.IsNil(a) || runtime.IsNil(b) {
+ vm.push(false)
+ break
+ }
vm.push(strings.Contains(a.(string), b.(string)))
case OpStartsWith:
b := vm.pop()
a := vm.pop()
+ if runtime.IsNil(a) || runtime.IsNil(b) {
+ vm.push(false)
+ break
+ }
vm.push(strings.HasPrefix(a.(string), b.(string)))
case OpEndsWith:
b := vm.pop()
a := vm.pop()
+ if runtime.IsNil(a) || runtime.IsNil(b) {
+ vm.push(false)
+ break
+ }
vm.push(strings.HasSuffix(a.(string), b.(string)))
case OpSlice:
diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go
index 889f9e77bd..c4234287dc 100644
--- a/vendor/github.com/fatih/color/color.go
+++ b/vendor/github.com/fatih/color/color.go
@@ -65,6 +65,29 @@ const (
CrossedOut
)
+const (
+ ResetBold Attribute = iota + 22
+ ResetItalic
+ ResetUnderline
+ ResetBlinking
+ _
+ ResetReversed
+ ResetConcealed
+ ResetCrossedOut
+)
+
+var mapResetAttributes map[Attribute]Attribute = map[Attribute]Attribute{
+ Bold: ResetBold,
+ Faint: ResetBold,
+ Italic: ResetItalic,
+ Underline: ResetUnderline,
+ BlinkSlow: ResetBlinking,
+ BlinkRapid: ResetBlinking,
+ ReverseVideo: ResetReversed,
+ Concealed: ResetConcealed,
+ CrossedOut: ResetCrossedOut,
+}
+
// Foreground text colors
const (
FgBlack Attribute = iota + 30
@@ -246,10 +269,7 @@ func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
// On Windows, users should wrap w with colorable.NewColorable() if w is of
// type *os.File.
func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
- c.SetWriter(w)
- defer c.UnsetWriter(w)
-
- return fmt.Fprintln(w, a...)
+ return fmt.Fprintln(w, c.wrap(fmt.Sprint(a...)))
}
// Println formats using the default formats for its operands and writes to
@@ -258,10 +278,7 @@ func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Println(a ...interface{}) (n int, err error) {
- c.Set()
- defer c.unset()
-
- return fmt.Fprintln(Output, a...)
+ return fmt.Fprintln(Output, c.wrap(fmt.Sprint(a...)))
}
// Sprint is just like Print, but returns a string instead of printing it.
@@ -271,7 +288,7 @@ func (c *Color) Sprint(a ...interface{}) string {
// Sprintln is just like Println, but returns a string instead of printing it.
func (c *Color) Sprintln(a ...interface{}) string {
- return c.wrap(fmt.Sprintln(a...))
+ return fmt.Sprintln(c.Sprint(a...))
}
// Sprintf is just like Printf, but returns a string instead of printing it.
@@ -353,7 +370,7 @@ func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
// string. Windows users should use this in conjunction with color.Output.
func (c *Color) SprintlnFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
- return c.wrap(fmt.Sprintln(a...))
+ return fmt.Sprintln(c.Sprint(a...))
}
}
@@ -383,7 +400,18 @@ func (c *Color) format() string {
}
func (c *Color) unformat() string {
- return fmt.Sprintf("%s[%dm", escape, Reset)
+ //return fmt.Sprintf("%s[%dm", escape, Reset)
+ //for each element in sequence let's use the speficic reset escape, ou the generic one if not found
+ format := make([]string, len(c.params))
+ for i, v := range c.params {
+ format[i] = strconv.Itoa(int(Reset))
+ ra, ok := mapResetAttributes[v]
+ if ok {
+ format[i] = strconv.Itoa(int(ra))
+ }
+ }
+
+ return fmt.Sprintf("%s[%sm", escape, strings.Join(format, ";"))
}
// DisableColor disables the color output. Useful to not change any existing
@@ -411,6 +439,12 @@ func (c *Color) isNoColorSet() bool {
// Equals returns a boolean value indicating whether two colors are equal.
func (c *Color) Equals(c2 *Color) bool {
+ if c == nil && c2 == nil {
+ return true
+ }
+ if c == nil || c2 == nil {
+ return false
+ }
if len(c.params) != len(c2.params) {
return false
}
diff --git a/vendor/github.com/go-resty/resty/v2/README.md b/vendor/github.com/go-resty/resty/v2/README.md
index d6d501ef89..2f9bac6ee4 100644
--- a/vendor/github.com/go-resty/resty/v2/README.md
+++ b/vendor/github.com/go-resty/resty/v2/README.md
@@ -4,7 +4,7 @@
Features section describes in detail about Resty capabilities
-
+
Resty Communication Channels
@@ -13,7 +13,7 @@
## News
- * v2.11.0 [released](https://github.com/go-resty/resty/releases/tag/v2.11.0) and tagged on Dec 27, 2023.
+ * v2.12.0 [released](https://github.com/go-resty/resty/releases/tag/v2.12.0) and tagged on Mar 17, 2024.
* v2.0.0 [released](https://github.com/go-resty/resty/releases/tag/v2.0.0) and tagged on Jul 16, 2019.
* v1.12.0 [released](https://github.com/go-resty/resty/releases/tag/v1.12.0) and tagged on Feb 27, 2019.
* v1.0 released and tagged on Sep 25, 2017. - Resty's first version was released on Sep 15, 2015 then it grew gradually as a very handy and helpful library. Its been a two years since first release. I'm very thankful to Resty users and its [contributors](https://github.com/go-resty/resty/graphs/contributors).
@@ -702,8 +702,8 @@ client.
})
```
-By default, resty will retry requests that return a non-nil error during execution.
-Therefore, the above setup will result in resty retrying requests with non-nil errors up to 3 times,
+By default, resty will retry requests that return a non-nil error during execution.
+Therefore, the above setup will result in resty retrying requests with non-nil errors up to 3 times,
with the delay increasing after each attempt.
You can optionally provide client with [custom retry conditions](https://pkg.go.dev/github.com/go-resty/resty/v2#RetryConditionFunc):
@@ -739,7 +739,7 @@ client.AddRetryCondition(
)
```
-Multiple retry conditions can be added.
+Multiple retry conditions can be added.
Note that if multiple conditions are specified, a retry will occur if any of the conditions are met.
It is also possible to use `resty.Backoff(...)` to get arbitrary retry scenarios
@@ -797,7 +797,7 @@ client.SetTimeout(1 * time.Minute)
// You can override all below settings and options at request level if you want to
//--------------------------------------------------------------------------------
// Host URL for all request. So you can use relative URL in the request
-client.SetHostURL("http://httpbin.org")
+client.SetBaseURL("http://httpbin.org")
// Headers for all request
client.SetHeader("Accept", "application/json")
@@ -861,7 +861,7 @@ client := resty.New()
// Set the previous transport that we created, set the scheme of the communication to the
// socket and set the unixSocket as the HostURL.
-client.SetTransport(&transport).SetScheme("http").SetHostURL(unixSocket)
+client.SetTransport(&transport).SetScheme("http").SetBaseURL(unixSocket)
// No need to write the host's URL on the request, just the path.
client.R().Get("http://localhost/index.html")
diff --git a/vendor/github.com/go-resty/resty/v2/client.go b/vendor/github.com/go-resty/resty/v2/client.go
index 446ba85175..1bcafba813 100644
--- a/vendor/github.com/go-resty/resty/v2/client.go
+++ b/vendor/github.com/go-resty/resty/v2/client.go
@@ -142,11 +142,11 @@ type Client struct {
proxyURL *url.URL
beforeRequest []RequestMiddleware
udBeforeRequest []RequestMiddleware
- udBeforeRequestLock sync.RWMutex
+ udBeforeRequestLock *sync.RWMutex
preReqHook PreRequestHook
successHooks []SuccessHook
afterResponse []ResponseMiddleware
- afterResponseLock sync.RWMutex
+ afterResponseLock *sync.RWMutex
requestLog RequestLogCallback
responseLog ResponseLogCallback
errorHooks []ErrorHook
@@ -1125,6 +1125,25 @@ func (c *Client) GetClient() *http.Client {
return c.httpClient
}
+// Clone returns a clone of the original client.
+//
+// Be careful when using this function:
+// - Interface values are not deeply cloned. Thus, both the original and the clone will use the
+// same value.
+// - This function is not safe for concurrent use. You should only use this when you are sure that
+// the client is not being used by any other goroutine.
+//
+// Since v2.12.0
+func (c *Client) Clone() *Client {
+ // dereference the pointer and copy the value
+ cc := *c
+
+ // lock values should not be copied - thus new values are used.
+ cc.afterResponseLock = &sync.RWMutex{}
+ cc.udBeforeRequestLock = &sync.RWMutex{}
+ return &cc
+}
+
//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
// Client Unexported methods
//_______________________________________________________________________
@@ -1360,9 +1379,11 @@ func createClient(hc *http.Client) *Client {
XMLUnmarshal: xml.Unmarshal,
HeaderAuthorizationKey: http.CanonicalHeaderKey("Authorization"),
- jsonEscapeHTML: true,
- httpClient: hc,
- debugBodySizeLimit: math.MaxInt32,
+ jsonEscapeHTML: true,
+ httpClient: hc,
+ debugBodySizeLimit: math.MaxInt32,
+ udBeforeRequestLock: &sync.RWMutex{},
+ afterResponseLock: &sync.RWMutex{},
}
// Logger
diff --git a/vendor/github.com/go-resty/resty/v2/middleware.go b/vendor/github.com/go-resty/resty/v2/middleware.go
index ac2bbc9e88..603448dfba 100644
--- a/vendor/github.com/go-resty/resty/v2/middleware.go
+++ b/vendor/github.com/go-resty/resty/v2/middleware.go
@@ -57,8 +57,8 @@ func parseRequestURL(c *Client, r *Request) error {
buf := acquireBuffer()
defer releaseBuffer(buf)
// search for the next or first opened curly bracket
- for curr := strings.Index(r.URL, "{"); curr > prev; curr = prev + strings.Index(r.URL[prev:], "{") {
- // write everything form the previous position up to the current
+ for curr := strings.Index(r.URL, "{"); curr == 0 || curr > prev; curr = prev + strings.Index(r.URL[prev:], "{") {
+ // write everything from the previous position up to the current
if curr > prev {
buf.WriteString(r.URL[prev:curr])
}
diff --git a/vendor/github.com/go-resty/resty/v2/request.go b/vendor/github.com/go-resty/resty/v2/request.go
index fec0976382..4e13ff094a 100644
--- a/vendor/github.com/go-resty/resty/v2/request.go
+++ b/vendor/github.com/go-resty/resty/v2/request.go
@@ -1014,7 +1014,12 @@ func (r *Request) fmtBodyString(sl int64) (body string) {
contentType := r.Header.Get(hdrContentTypeKey)
kind := kindOf(r.Body)
if canJSONMarshal(contentType, kind) {
- prtBodyBytes, err = noescapeJSONMarshalIndent(&r.Body)
+ var bodyBuf *bytes.Buffer
+ bodyBuf, err = noescapeJSONMarshalIndent(&r.Body)
+ if err == nil {
+ prtBodyBytes = bodyBuf.Bytes()
+ defer releaseBuffer(bodyBuf)
+ }
} else if IsXMLType(contentType) && (kind == reflect.Struct) {
prtBodyBytes, err = xml.MarshalIndent(&r.Body, "", " ")
} else if b, ok := r.Body.(string); ok {
@@ -1077,17 +1082,16 @@ var noescapeJSONMarshal = func(v interface{}) (*bytes.Buffer, error) {
return buf, nil
}
-var noescapeJSONMarshalIndent = func(v interface{}) ([]byte, error) {
+var noescapeJSONMarshalIndent = func(v interface{}) (*bytes.Buffer, error) {
buf := acquireBuffer()
- defer releaseBuffer(buf)
-
encoder := json.NewEncoder(buf)
encoder.SetEscapeHTML(false)
encoder.SetIndent("", " ")
if err := encoder.Encode(v); err != nil {
+ releaseBuffer(buf)
return nil, err
}
- return buf.Bytes(), nil
+ return buf, nil
}
diff --git a/vendor/github.com/go-resty/resty/v2/resty.go b/vendor/github.com/go-resty/resty/v2/resty.go
index 21dcd5655b..985cff2502 100644
--- a/vendor/github.com/go-resty/resty/v2/resty.go
+++ b/vendor/github.com/go-resty/resty/v2/resty.go
@@ -14,7 +14,7 @@ import (
)
// Version # of resty
-const Version = "2.10.0"
+const Version = "2.12.0"
// New method creates a new Resty client.
func New() *Client {
diff --git a/vendor/github.com/go-resty/resty/v2/util.go b/vendor/github.com/go-resty/resty/v2/util.go
index 27b466dc18..5a69e4fcf3 100644
--- a/vendor/github.com/go-resty/resty/v2/util.go
+++ b/vendor/github.com/go-resty/resty/v2/util.go
@@ -216,7 +216,7 @@ func writeMultipartFormFile(w *multipart.Writer, fieldName, fileName string, r i
return err
}
- partWriter, err := w.CreatePart(createMultipartHeader(fieldName, fileName, http.DetectContentType(cbuf)))
+ partWriter, err := w.CreatePart(createMultipartHeader(fieldName, fileName, http.DetectContentType(cbuf[:size])))
if err != nil {
return err
}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
index 31553e7848..5dd4e44786 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
@@ -148,6 +148,12 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM
var pairs []string
for key, vals := range req.Header {
key = textproto.CanonicalMIMEHeaderKey(key)
+ switch key {
+ case xForwardedFor, xForwardedHost:
+ // Handled separately below
+ continue
+ }
+
for _, val := range vals {
// For backwards-compatibility, pass through 'authorization' header with no prefix.
if key == "Authorization" {
@@ -181,18 +187,17 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM
pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
}
+ xff := req.Header.Values(xForwardedFor)
if addr := req.RemoteAddr; addr != "" {
if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
- if fwd := req.Header.Get(xForwardedFor); fwd == "" {
- pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP)
- } else {
- pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP))
- }
+ xff = append(xff, remoteIP)
}
}
+ if len(xff) > 0 {
+ pairs = append(pairs, strings.ToLower(xForwardedFor), strings.Join(xff, ", "))
+ }
if timeout != 0 {
- //nolint:govet // The context outlives this function
ctx, _ = context.WithTimeout(ctx, timeout)
}
if len(pairs) == 0 {
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
index 230cac7b86..5682998699 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
@@ -71,7 +71,7 @@ func HTTPStatusFromCode(code codes.Code) int {
case codes.DataLoss:
return http.StatusInternalServerError
default:
- grpclog.Infof("Unknown gRPC error code: %v", code)
+ grpclog.Warningf("Unknown gRPC error code: %v", code)
return http.StatusInternalServerError
}
}
@@ -114,17 +114,17 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
buf, merr := marshaler.Marshal(pb)
if merr != nil {
- grpclog.Infof("Failed to marshal error message %q: %v", s, merr)
+ grpclog.Errorf("Failed to marshal error message %q: %v", s, merr)
w.WriteHeader(http.StatusInternalServerError)
if _, err := io.WriteString(w, fallback); err != nil {
- grpclog.Infof("Failed to write response: %v", err)
+ grpclog.Errorf("Failed to write response: %v", err)
}
return
}
md, ok := ServerMetadataFromContext(ctx)
if !ok {
- grpclog.Infof("Failed to extract ServerMetadata from context")
+ grpclog.Error("Failed to extract ServerMetadata from context")
}
handleForwardResponseServerMetadata(w, mux, md)
@@ -148,7 +148,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
w.WriteHeader(st)
if _, err := w.Write(buf); err != nil {
- grpclog.Infof("Failed to write response: %v", err)
+ grpclog.Errorf("Failed to write response: %v", err)
}
if doForwardTrailers {
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
index 19d9d37fff..9005d6a0bf 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
@@ -41,7 +41,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
m, ok := item.node.(map[string]interface{})
switch {
- case ok:
+ case ok && len(m) > 0:
// if the item is an object, then enqueue all of its children
for k, v := range m {
if item.msg == nil {
@@ -96,6 +96,8 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
queue = append(queue, child)
}
}
+ case ok && len(m) == 0:
+ fallthrough
case len(item.path) > 0:
// otherwise, it's a leaf node so print its path
fm.Paths = append(fm.Paths, item.path)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
index 5e14cf8b0e..de1eef1f4f 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
@@ -6,6 +6,7 @@ import (
"io"
"net/http"
"net/textproto"
+ "strconv"
"strings"
"google.golang.org/genproto/googleapis/api/httpbody"
@@ -17,16 +18,10 @@ import (
// ForwardResponseStream forwards the stream from gRPC server to REST client.
func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
- f, ok := w.(http.Flusher)
- if !ok {
- grpclog.Infof("Flush not supported in %T", w)
- http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
- return
- }
-
+ rc := http.NewResponseController(w)
md, ok := ServerMetadataFromContext(ctx)
if !ok {
- grpclog.Infof("Failed to extract ServerMetadata from context")
+ grpclog.Error("Failed to extract ServerMetadata from context")
http.Error(w, "unexpected error", http.StatusInternalServerError)
return
}
@@ -81,20 +76,29 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
}
if err != nil {
- grpclog.Infof("Failed to marshal response chunk: %v", err)
+ grpclog.Errorf("Failed to marshal response chunk: %v", err)
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
return
}
if _, err := w.Write(buf); err != nil {
- grpclog.Infof("Failed to send response chunk: %v", err)
+ grpclog.Errorf("Failed to send response chunk: %v", err)
return
}
wroteHeader = true
if _, err := w.Write(delimiter); err != nil {
- grpclog.Infof("Failed to send delimiter chunk: %v", err)
+ grpclog.Errorf("Failed to send delimiter chunk: %v", err)
+ return
+ }
+ err = rc.Flush()
+ if err != nil {
+ if errors.Is(err, http.ErrNotSupported) {
+ grpclog.Errorf("Flush not supported in %T", w)
+ http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
+ return
+ }
+ grpclog.Errorf("Failed to flush response to client: %v", err)
return
}
- f.Flush()
}
}
@@ -136,7 +140,7 @@ type responseBody interface {
func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
md, ok := ServerMetadataFromContext(ctx)
if !ok {
- grpclog.Infof("Failed to extract ServerMetadata from context")
+ grpclog.Error("Failed to extract ServerMetadata from context")
}
handleForwardResponseServerMetadata(w, mux, md)
@@ -168,13 +172,17 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
buf, err = marshaler.Marshal(resp)
}
if err != nil {
- grpclog.Infof("Marshal error: %v", err)
+ grpclog.Errorf("Marshal error: %v", err)
HTTPError(ctx, mux, marshaler, w, req, err)
return
}
+ if !doForwardTrailers {
+ w.Header().Set("Content-Length", strconv.Itoa(len(buf)))
+ }
+
if _, err = w.Write(buf); err != nil {
- grpclog.Infof("Failed to write response: %v", err)
+ grpclog.Errorf("Failed to write response: %v", err)
}
if doForwardTrailers {
@@ -193,7 +201,7 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re
}
for _, opt := range opts {
if err := opt(ctx, w, resp); err != nil {
- grpclog.Infof("Error handling ForwardResponseOptions: %v", err)
+ grpclog.Errorf("Error handling ForwardResponseOptions: %v", err)
return err
}
}
@@ -209,15 +217,15 @@ func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, mar
}
buf, err := marshaler.Marshal(msg)
if err != nil {
- grpclog.Infof("Failed to marshal an error: %v", err)
+ grpclog.Errorf("Failed to marshal an error: %v", err)
return
}
if _, err := w.Write(buf); err != nil {
- grpclog.Infof("Failed to notify error to client: %v", err)
+ grpclog.Errorf("Failed to notify error to client: %v", err)
return
}
if _, err := w.Write(delimiter); err != nil {
- grpclog.Infof("Failed to send delimiter chunk: %v", err)
+ grpclog.Errorf("Failed to send delimiter chunk: %v", err)
return
}
}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go
index d6aa825783..fe52081ab9 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go
@@ -24,6 +24,11 @@ func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
return json.Marshal(v)
}
+// MarshalIndent is like Marshal but applies Indent to format the output
+func (j *JSONBuiltin) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ return json.MarshalIndent(v, prefix, indent)
+}
+
// Unmarshal unmarshals JSON data into "v".
func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
return json.Unmarshal(data, v)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
index 51b8247da2..8376d1e0ef 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
@@ -30,10 +30,6 @@ func (*JSONPb) ContentType(_ interface{}) string {
// Marshal marshals "v" into JSON.
func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
- if _, ok := v.(proto.Message); !ok {
- return j.marshalNonProtoField(v)
- }
-
var buf bytes.Buffer
if err := j.marshalTo(&buf, v); err != nil {
return nil, err
@@ -48,9 +44,17 @@ func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
if err != nil {
return err
}
+ if j.Indent != "" {
+ b := &bytes.Buffer{}
+ if err := json.Indent(b, buf, "", j.Indent); err != nil {
+ return err
+ }
+ buf = b.Bytes()
+ }
_, err = w.Write(buf)
return err
}
+
b, err := j.MarshalOptions.Marshal(p)
if err != nil {
return err
@@ -150,9 +154,6 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
}
m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
}
- if j.Indent != "" {
- return json.MarshalIndent(m, "", j.Indent)
- }
return json.Marshal(m)
}
if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers {
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
index a714de0240..0b051e6e89 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
@@ -46,7 +46,7 @@ func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, out
for _, contentTypeVal := range r.Header[contentTypeHeader] {
contentType, _, err := mime.ParseMediaType(contentTypeVal)
if err != nil {
- grpclog.Infof("Failed to parse Content-Type %s: %v", contentTypeVal, err)
+ grpclog.Errorf("Failed to parse Content-Type %s: %v", contentTypeVal, err)
continue
}
if m, ok := mux.marshalers.mimeMap[contentType]; ok {
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
index 628e1fde1c..ed9a7e4387 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
@@ -341,13 +341,13 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
- r.Method = strings.ToUpper(override)
if err := r.ParseForm(); err != nil {
_, outboundMarshaler := MarshalerForRequest(s, r)
sterr := status.Error(codes.InvalidArgument, err.Error())
s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
return
}
+ r.Method = strings.ToUpper(override)
}
var pathComponents []string
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
index 8f90d15a56..e54507145b 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
@@ -52,13 +52,13 @@ type Pattern struct {
// It returns an error if the given definition is invalid.
func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) {
if version != 1 {
- grpclog.Infof("unsupported version: %d", version)
+ grpclog.Errorf("unsupported version: %d", version)
return Pattern{}, ErrInvalidPattern
}
l := len(ops)
if l%2 != 0 {
- grpclog.Infof("odd number of ops codes: %d", l)
+ grpclog.Errorf("odd number of ops codes: %d", l)
return Pattern{}, ErrInvalidPattern
}
@@ -81,14 +81,14 @@ func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, er
stack++
case utilities.OpPushM:
if pushMSeen {
- grpclog.Infof("pushM appears twice")
+ grpclog.Error("pushM appears twice")
return Pattern{}, ErrInvalidPattern
}
pushMSeen = true
stack++
case utilities.OpLitPush:
if op.operand < 0 || len(pool) <= op.operand {
- grpclog.Infof("negative literal index: %d", op.operand)
+ grpclog.Errorf("negative literal index: %d", op.operand)
return Pattern{}, ErrInvalidPattern
}
if pushMSeen {
@@ -97,18 +97,18 @@ func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, er
stack++
case utilities.OpConcatN:
if op.operand <= 0 {
- grpclog.Infof("negative concat size: %d", op.operand)
+ grpclog.Errorf("negative concat size: %d", op.operand)
return Pattern{}, ErrInvalidPattern
}
stack -= op.operand
if stack < 0 {
- grpclog.Info("stack underflow")
+ grpclog.Error("stack underflow")
return Pattern{}, ErrInvalidPattern
}
stack++
case utilities.OpCapture:
if op.operand < 0 || len(pool) <= op.operand {
- grpclog.Infof("variable name index out of bound: %d", op.operand)
+ grpclog.Errorf("variable name index out of bound: %d", op.operand)
return Pattern{}, ErrInvalidPattern
}
v := pool[op.operand]
@@ -116,11 +116,11 @@ func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, er
vars = append(vars, v)
stack--
if stack < 0 {
- grpclog.Infof("stack underflow")
+ grpclog.Error("stack underflow")
return Pattern{}, ErrInvalidPattern
}
default:
- grpclog.Infof("invalid opcode: %d", op.code)
+ grpclog.Errorf("invalid opcode: %d", op.code)
return Pattern{}, ErrInvalidPattern
}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
index d01933c4fd..fe634174b8 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
@@ -51,11 +51,13 @@ func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *u
key = match[1]
values = append([]string{match[2]}, values...)
}
- fieldPath := strings.Split(key, ".")
+
+ msgValue := msg.ProtoReflect()
+ fieldPath := normalizeFieldPath(msgValue, strings.Split(key, "."))
if filter.HasCommonPrefix(fieldPath) {
continue
}
- if err := populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, values); err != nil {
+ if err := populateFieldValueFromPath(msgValue, fieldPath, values); err != nil {
return err
}
}
@@ -68,6 +70,38 @@ func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value stri
return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value})
}
+func normalizeFieldPath(msgValue protoreflect.Message, fieldPath []string) []string {
+ newFieldPath := make([]string, 0, len(fieldPath))
+ for i, fieldName := range fieldPath {
+ fields := msgValue.Descriptor().Fields()
+ fieldDesc := fields.ByTextName(fieldName)
+ if fieldDesc == nil {
+ fieldDesc = fields.ByJSONName(fieldName)
+ }
+ if fieldDesc == nil {
+ // return initial field path values if no matching message field was found
+ return fieldPath
+ }
+
+ newFieldPath = append(newFieldPath, string(fieldDesc.Name()))
+
+ // If this is the last element, we're done
+ if i == len(fieldPath)-1 {
+ break
+ }
+
+ // Only singular message fields are allowed
+ if fieldDesc.Message() == nil || fieldDesc.Cardinality() == protoreflect.Repeated {
+ return fieldPath
+ }
+
+ // Get the nested message
+ msgValue = msgValue.Get(fieldDesc).Message()
+ }
+
+ return newFieldPath
+}
+
func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error {
if len(fieldPath) < 1 {
return errors.New("no field path")
diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go
index f62c0c5a1b..5c23ae8b76 100644
--- a/vendor/github.com/hashicorp/consul/api/api.go
+++ b/vendor/github.com/hashicorp/consul/api/api.go
@@ -1129,6 +1129,23 @@ func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*
return wm, nil
}
+// delete is used to do a DELETE request against an endpoint
+func (c *Client) delete(endpoint string, q *QueryOptions) (*WriteMeta, error) {
+ r := c.newRequest("DELETE", endpoint)
+ r.setQueryOptions(q)
+ rtt, resp, err := c.doRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer closeResponseBody(resp)
+ if err = requireHttpCodes(resp, 204, 200); err != nil {
+ return nil, err
+ }
+
+ wm := &WriteMeta{RequestTime: rtt}
+ return wm, nil
+}
+
// parseQueryMeta is used to help parse query meta-data
//
// TODO(rb): bug? the error from this function is never handled
diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go
index baf274e2da..ba2bac19ef 100644
--- a/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go
+++ b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go
@@ -195,6 +195,9 @@ type TerminatingGatewayConfigEntry struct {
type LinkedService struct {
// Referencing other partitions is not supported.
+ //DisableAutoHostRewrite disables terminating gateways auto host rewrite feature when set to true.
+ DisableAutoHostRewrite bool `json:",omitempty"`
+
// Namespace is where the service is registered.
Namespace string `json:",omitempty"`
diff --git a/vendor/github.com/hashicorp/consul/api/partition.go b/vendor/github.com/hashicorp/consul/api/partition.go
index 8467c31189..8a9bfb482f 100644
--- a/vendor/github.com/hashicorp/consul/api/partition.go
+++ b/vendor/github.com/hashicorp/consul/api/partition.go
@@ -27,6 +27,9 @@ type Partition struct {
// ModifyIndex is the latest Raft index at which the Partition was modified.
ModifyIndex uint64 `json:"ModifyIndex,omitempty"`
+
+ // DisableGossip will not enable a gossip pool for the partition
+ DisableGossip bool `json:"DisableGossip,omitempty"`
}
// PartitionDefaultName is the default partition value.
diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go
index 639513d29f..7fb9c390c9 100644
--- a/vendor/github.com/hashicorp/consul/api/raw.go
+++ b/vendor/github.com/hashicorp/consul/api/raw.go
@@ -25,3 +25,8 @@ func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*Query
func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
return raw.c.write(endpoint, in, out, q)
}
+
+// Delete is used to do a DELETE request against an endpoint
+func (raw *Raw) Delete(endpoint string, q *QueryOptions) (*WriteMeta, error) {
+ return raw.c.delete(endpoint, q)
+}
diff --git a/vendor/github.com/hashicorp/go-version/CHANGELOG.md b/vendor/github.com/hashicorp/go-version/CHANGELOG.md
index 5f16dd140c..6d48174bfb 100644
--- a/vendor/github.com/hashicorp/go-version/CHANGELOG.md
+++ b/vendor/github.com/hashicorp/go-version/CHANGELOG.md
@@ -1,3 +1,22 @@
+# 1.7.0 (May 24, 2024)
+
+ENHANCEMENTS:
+
+- Remove `reflect` dependency ([#91](https://github.com/hashicorp/go-version/pull/91))
+- Implement the `database/sql.Scanner` and `database/sql/driver.Value` interfaces for `Version` ([#133](https://github.com/hashicorp/go-version/pull/133))
+
+INTERNAL:
+
+- [COMPLIANCE] Add Copyright and License Headers ([#115](https://github.com/hashicorp/go-version/pull/115))
+- [COMPLIANCE] Update MPL-2.0 LICENSE ([#105](https://github.com/hashicorp/go-version/pull/105))
+- Bump actions/cache from 3.0.11 to 3.2.5 ([#116](https://github.com/hashicorp/go-version/pull/116))
+- Bump actions/checkout from 3.2.0 to 3.3.0 ([#111](https://github.com/hashicorp/go-version/pull/111))
+- Bump actions/upload-artifact from 3.1.1 to 3.1.2 ([#112](https://github.com/hashicorp/go-version/pull/112))
+- GHA Migration ([#103](https://github.com/hashicorp/go-version/pull/103))
+- github: Pin external GitHub Actions to hashes ([#107](https://github.com/hashicorp/go-version/pull/107))
+- SEC-090: Automated trusted workflow pinning (2023-04-05) ([#124](https://github.com/hashicorp/go-version/pull/124))
+- update readme ([#104](https://github.com/hashicorp/go-version/pull/104))
+
# 1.6.0 (June 28, 2022)
FEATURES:
diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE
index c33dcc7c92..1409d6ab92 100644
--- a/vendor/github.com/hashicorp/go-version/LICENSE
+++ b/vendor/github.com/hashicorp/go-version/LICENSE
@@ -1,3 +1,5 @@
+Copyright (c) 2014 HashiCorp, Inc.
+
Mozilla Public License, version 2.0
1. Definitions
diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md
index 4d25050903..4b7806cd96 100644
--- a/vendor/github.com/hashicorp/go-version/README.md
+++ b/vendor/github.com/hashicorp/go-version/README.md
@@ -1,5 +1,5 @@
# Versioning Library for Go
-[![Build Status](https://circleci.com/gh/hashicorp/go-version/tree/main.svg?style=svg)](https://circleci.com/gh/hashicorp/go-version/tree/main)
+![Build Status](https://github.com/hashicorp/go-version/actions/workflows/go-tests.yml/badge.svg)
[![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version)
go-version is a library for parsing versions and version constraints,
diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go
index da5d1aca14..29bdc4d2b5 100644
--- a/vendor/github.com/hashicorp/go-version/constraint.go
+++ b/vendor/github.com/hashicorp/go-version/constraint.go
@@ -1,8 +1,10 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
package version
import (
"fmt"
- "reflect"
"regexp"
"sort"
"strings"
@@ -199,7 +201,7 @@ func prereleaseCheck(v, c *Version) bool {
case cPre && vPre:
// A constraint with a pre-release can only match a pre-release version
// with the same base segments.
- return reflect.DeepEqual(c.Segments64(), v.Segments64())
+ return v.equalSegments(c)
case !cPre && vPre:
// A constraint without a pre-release can only match a version without a
diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go
index e87df69906..7c683c2813 100644
--- a/vendor/github.com/hashicorp/go-version/version.go
+++ b/vendor/github.com/hashicorp/go-version/version.go
@@ -1,9 +1,12 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
package version
import (
"bytes"
+ "database/sql/driver"
"fmt"
- "reflect"
"regexp"
"strconv"
"strings"
@@ -117,11 +120,8 @@ func (v *Version) Compare(other *Version) int {
return 0
}
- segmentsSelf := v.Segments64()
- segmentsOther := other.Segments64()
-
// If the segments are the same, we must compare on prerelease info
- if reflect.DeepEqual(segmentsSelf, segmentsOther) {
+ if v.equalSegments(other) {
preSelf := v.Prerelease()
preOther := other.Prerelease()
if preSelf == "" && preOther == "" {
@@ -137,6 +137,8 @@ func (v *Version) Compare(other *Version) int {
return comparePrereleases(preSelf, preOther)
}
+ segmentsSelf := v.Segments64()
+ segmentsOther := other.Segments64()
// Get the highest specificity (hS), or if they're equal, just use segmentSelf length
lenSelf := len(segmentsSelf)
lenOther := len(segmentsOther)
@@ -160,7 +162,7 @@ func (v *Version) Compare(other *Version) int {
// this means Other had the lower specificity
// Check to see if the remaining segments in Self are all zeros -
if !allZero(segmentsSelf[i:]) {
- //if not, it means that Self has to be greater than Other
+ // if not, it means that Self has to be greater than Other
return 1
}
break
@@ -180,6 +182,21 @@ func (v *Version) Compare(other *Version) int {
return 0
}
+func (v *Version) equalSegments(other *Version) bool {
+ segmentsSelf := v.Segments64()
+ segmentsOther := other.Segments64()
+
+ if len(segmentsSelf) != len(segmentsOther) {
+ return false
+ }
+ for i, v := range segmentsSelf {
+ if v != segmentsOther[i] {
+ return false
+ }
+ }
+ return true
+}
+
func allZero(segs []int64) bool {
for _, s := range segs {
if s != 0 {
@@ -405,3 +422,20 @@ func (v *Version) UnmarshalText(b []byte) error {
func (v *Version) MarshalText() ([]byte, error) {
return []byte(v.String()), nil
}
+
+// Scan implements the sql.Scanner interface.
+func (v *Version) Scan(src interface{}) error {
+ switch src := src.(type) {
+ case string:
+ return v.UnmarshalText([]byte(src))
+ case nil:
+ return nil
+ default:
+ return fmt.Errorf("cannot scan %T as Version", src)
+ }
+}
+
+// Value implements the driver.Valuer interface.
+func (v *Version) Value() (driver.Value, error) {
+ return v.String(), nil
+}
diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go
index cc888d43e6..83547fe13d 100644
--- a/vendor/github.com/hashicorp/go-version/version_collection.go
+++ b/vendor/github.com/hashicorp/go-version/version_collection.go
@@ -1,3 +1,6 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
package version
// Collection is a type that implements the sort.Interface interface
diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore
new file mode 100644
index 0000000000..15586a2b54
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/.gitignore
@@ -0,0 +1,9 @@
+y.output
+
+# ignore intellij files
+.idea
+*.iml
+*.ipr
+*.iws
+
+*.test
diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml
new file mode 100644
index 0000000000..cb63a32161
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/.travis.yml
@@ -0,0 +1,13 @@
+sudo: false
+
+language: go
+
+go:
+ - 1.x
+ - tip
+
+branches:
+ only:
+ - master
+
+script: make test
diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE
new file mode 100644
index 0000000000..c33dcc7c92
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile
new file mode 100644
index 0000000000..84fd743f5c
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/Makefile
@@ -0,0 +1,18 @@
+TEST?=./...
+
+default: test
+
+fmt: generate
+ go fmt ./...
+
+test: generate
+ go get -t ./...
+ go test $(TEST) $(TESTARGS)
+
+generate:
+ go generate ./...
+
+updatedeps:
+ go get -u golang.org/x/tools/cmd/stringer
+
+.PHONY: default generate test updatedeps
diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md
new file mode 100644
index 0000000000..c8223326dd
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/README.md
@@ -0,0 +1,125 @@
+# HCL
+
+[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl)
+
+HCL (HashiCorp Configuration Language) is a configuration language built
+by HashiCorp. The goal of HCL is to build a structured configuration language
+that is both human and machine friendly for use with command-line tools, but
+specifically targeted towards DevOps tools, servers, etc.
+
+HCL is also fully JSON compatible. That is, JSON can be used as completely
+valid input to a system expecting HCL. This helps makes systems
+interoperable with other systems.
+
+HCL is heavily inspired by
+[libucl](https://github.com/vstakhov/libucl),
+nginx configuration, and others similar.
+
+## Why?
+
+A common question when viewing HCL is to ask the question: why not
+JSON, YAML, etc.?
+
+Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com)
+used a variety of configuration languages from full programming languages
+such as Ruby to complete data structure languages such as JSON. What we
+learned is that some people wanted human-friendly configuration languages
+and some people wanted machine-friendly languages.
+
+JSON fits a nice balance in this, but is fairly verbose and most
+importantly doesn't support comments. With YAML, we found that beginners
+had a really hard time determining what the actual structure was, and
+ended up guessing more often than not whether to use a hyphen, colon, etc.
+in order to represent some configuration key.
+
+Full programming languages such as Ruby enable complex behavior
+a configuration language shouldn't usually allow, and also forces
+people to learn some set of Ruby.
+
+Because of this, we decided to create our own configuration language
+that is JSON-compatible. Our configuration language (HCL) is designed
+to be written and modified by humans. The API for HCL allows JSON
+as an input so that it is also machine-friendly (machines can generate
+JSON instead of trying to generate HCL).
+
+Our goal with HCL is not to alienate other configuration languages.
+It is instead to provide HCL as a specialized language for our tools,
+and JSON as the interoperability layer.
+
+## Syntax
+
+For a complete grammar, please see the parser itself. A high-level overview
+of the syntax and grammar is listed here.
+
+ * Single line comments start with `#` or `//`
+
+ * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments
+ are not allowed. A multi-line comment (also known as a block comment)
+ terminates at the first `*/` found.
+
+ * Values are assigned with the syntax `key = value` (whitespace doesn't
+ matter). The value can be any primitive: a string, number, boolean,
+ object, or list.
+
+ * Strings are double-quoted and can contain any UTF-8 characters.
+ Example: `"Hello, World"`
+
+ * Multi-line strings start with `<-
+ echo %Path%
+
+ go version
+
+ go env
+
+ go get -t ./...
+
+build_script:
+- cmd: go test -v ./...
diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go
new file mode 100644
index 0000000000..bed9ebbe14
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/decoder.go
@@ -0,0 +1,729 @@
+package hcl
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/parser"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// This is the tag to use with structures to have settings for HCL
+const tagName = "hcl"
+
+var (
+ // nodeType holds a reference to the type of ast.Node
+ nodeType reflect.Type = findNodeType()
+)
+
+// Unmarshal accepts a byte slice as input and writes the
+// data to the value pointed to by v.
+func Unmarshal(bs []byte, v interface{}) error {
+ root, err := parse(bs)
+ if err != nil {
+ return err
+ }
+
+ return DecodeObject(v, root)
+}
+
+// Decode reads the given input and decodes it into the structure
+// given by `out`.
+func Decode(out interface{}, in string) error {
+ obj, err := Parse(in)
+ if err != nil {
+ return err
+ }
+
+ return DecodeObject(out, obj)
+}
+
+// DecodeObject is a lower-level version of Decode. It decodes a
+// raw Object into the given output.
+func DecodeObject(out interface{}, n ast.Node) error {
+ val := reflect.ValueOf(out)
+ if val.Kind() != reflect.Ptr {
+ return errors.New("result must be a pointer")
+ }
+
+ // If we have the file, we really decode the root node
+ if f, ok := n.(*ast.File); ok {
+ n = f.Node
+ }
+
+ var d decoder
+ return d.decode("root", n, val.Elem())
+}
+
+type decoder struct {
+ stack []reflect.Kind
+}
+
+func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
+ k := result
+
+ // If we have an interface with a valid value, we use that
+ // for the check.
+ if result.Kind() == reflect.Interface {
+ elem := result.Elem()
+ if elem.IsValid() {
+ k = elem
+ }
+ }
+
+ // Push current onto stack unless it is an interface.
+ if k.Kind() != reflect.Interface {
+ d.stack = append(d.stack, k.Kind())
+
+ // Schedule a pop
+ defer func() {
+ d.stack = d.stack[:len(d.stack)-1]
+ }()
+ }
+
+ switch k.Kind() {
+ case reflect.Bool:
+ return d.decodeBool(name, node, result)
+ case reflect.Float32, reflect.Float64:
+ return d.decodeFloat(name, node, result)
+ case reflect.Int, reflect.Int32, reflect.Int64:
+ return d.decodeInt(name, node, result)
+ case reflect.Interface:
+ // When we see an interface, we make our own thing
+ return d.decodeInterface(name, node, result)
+ case reflect.Map:
+ return d.decodeMap(name, node, result)
+ case reflect.Ptr:
+ return d.decodePtr(name, node, result)
+ case reflect.Slice:
+ return d.decodeSlice(name, node, result)
+ case reflect.String:
+ return d.decodeString(name, node, result)
+ case reflect.Struct:
+ return d.decodeStruct(name, node, result)
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
+ }
+ }
+}
+
+func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ if n.Token.Type == token.BOOL {
+ v, err := strconv.ParseBool(n.Token.Text)
+ if err != nil {
+ return err
+ }
+
+ result.Set(reflect.ValueOf(v))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER {
+ v, err := strconv.ParseFloat(n.Token.Text, 64)
+ if err != nil {
+ return err
+ }
+
+ result.Set(reflect.ValueOf(v).Convert(result.Type()))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.NUMBER:
+ v, err := strconv.ParseInt(n.Token.Text, 0, 0)
+ if err != nil {
+ return err
+ }
+
+ if result.Kind() == reflect.Interface {
+ result.Set(reflect.ValueOf(int(v)))
+ } else {
+ result.SetInt(v)
+ }
+ return nil
+ case token.STRING:
+ v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
+ if err != nil {
+ return err
+ }
+
+ if result.Kind() == reflect.Interface {
+ result.Set(reflect.ValueOf(int(v)))
+ } else {
+ result.SetInt(v)
+ }
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
+ // When we see an ast.Node, we retain the value to enable deferred decoding.
+ // Very useful in situations where we want to preserve ast.Node information
+ // like Pos
+ if result.Type() == nodeType && result.CanSet() {
+ result.Set(reflect.ValueOf(node))
+ return nil
+ }
+
+ var set reflect.Value
+ redecode := true
+
+ // For testing types, ObjectType should just be treated as a list. We
+ // set this to a temporary var because we want to pass in the real node.
+ testNode := node
+ if ot, ok := node.(*ast.ObjectType); ok {
+ testNode = ot.List
+ }
+
+ switch n := testNode.(type) {
+ case *ast.ObjectList:
+ // If we're at the root or we're directly within a slice, then we
+ // decode objects into map[string]interface{}, otherwise we decode
+ // them into lists.
+ if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+ var temp map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeMap(
+ reflect.MapOf(
+ reflect.TypeOf(""),
+ tempVal.Type().Elem()))
+
+ set = result
+ } else {
+ var temp []map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
+ set = result
+ }
+ case *ast.ObjectType:
+ // If we're at the root or we're directly within a slice, then we
+ // decode objects into map[string]interface{}, otherwise we decode
+ // them into lists.
+ if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+ var temp map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeMap(
+ reflect.MapOf(
+ reflect.TypeOf(""),
+ tempVal.Type().Elem()))
+
+ set = result
+ } else {
+ var temp []map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
+ set = result
+ }
+ case *ast.ListType:
+ var temp []interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
+ set = result
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.BOOL:
+ var result bool
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.FLOAT:
+ var result float64
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.NUMBER:
+ var result int
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.STRING, token.HEREDOC:
+ set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
+ }
+ }
+ default:
+ return fmt.Errorf(
+ "%s: cannot decode into interface: %T",
+ name, node)
+ }
+
+ // Set the result to what its supposed to be, then reset
+ // result so we don't reflect into this method anymore.
+ result.Set(set)
+
+ if redecode {
+ // Revisit the node so that we can use the newly instantiated
+ // thing and populate it.
+ if err := d.decode(name, node, result); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
+ if item, ok := node.(*ast.ObjectItem); ok {
+ node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+ }
+
+ if ot, ok := node.(*ast.ObjectType); ok {
+ node = ot.List
+ }
+
+ n, ok := node.(*ast.ObjectList)
+ if !ok {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
+ }
+ }
+
+ // If we have an interface, then we can address the interface,
+ // but not the slice itself, so get the element but set the interface
+ set := result
+ if result.Kind() == reflect.Interface {
+ result = result.Elem()
+ }
+
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ resultKeyType := resultType.Key()
+ if resultKeyType.Kind() != reflect.String {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: map must have string keys", name),
+ }
+ }
+
+ // Make a map if it is nil
+ resultMap := result
+ if result.IsNil() {
+ resultMap = reflect.MakeMap(
+ reflect.MapOf(resultKeyType, resultElemType))
+ }
+
+ // Go through each element and decode it.
+ done := make(map[string]struct{})
+ for _, item := range n.Items {
+ if item.Val == nil {
+ continue
+ }
+
+ // github.com/hashicorp/terraform/issue/5740
+ if len(item.Keys) == 0 {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: map must have string keys", name),
+ }
+ }
+
+ // Get the key we're dealing with, which is the first item
+ keyStr := item.Keys[0].Token.Value().(string)
+
+ // If we've already processed this key, then ignore it
+ if _, ok := done[keyStr]; ok {
+ continue
+ }
+
+ // Determine the value. If we have more than one key, then we
+ // get the objectlist of only these keys.
+ itemVal := item.Val
+ if len(item.Keys) > 1 {
+ itemVal = n.Filter(keyStr)
+ done[keyStr] = struct{}{}
+ }
+
+ // Make the field name
+ fieldName := fmt.Sprintf("%s.%s", name, keyStr)
+
+ // Get the key/value as reflection values
+ key := reflect.ValueOf(keyStr)
+ val := reflect.Indirect(reflect.New(resultElemType))
+
+ // If we have a pre-existing value in the map, use that
+ oldVal := resultMap.MapIndex(key)
+ if oldVal.IsValid() {
+ val.Set(oldVal)
+ }
+
+ // Decode!
+ if err := d.decode(fieldName, itemVal, val); err != nil {
+ return err
+ }
+
+ // Set the value on the map
+ resultMap.SetMapIndex(key, val)
+ }
+
+ // Set the final map if we can
+ set.Set(resultMap)
+ return nil
+}
+
+func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
+ // Create an element of the concrete (non pointer) type and decode
+ // into that. Then set the value of the pointer to this type.
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ val := reflect.New(resultElemType)
+ if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
+ return err
+ }
+
+ result.Set(val)
+ return nil
+}
+
+func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
+ // If we have an interface, then we can address the interface,
+ // but not the slice itself, so get the element but set the interface
+ set := result
+ if result.Kind() == reflect.Interface {
+ result = result.Elem()
+ }
+ // Create the slice if it isn't nil
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ if result.IsNil() {
+ resultSliceType := reflect.SliceOf(resultElemType)
+ result = reflect.MakeSlice(
+ resultSliceType, 0, 0)
+ }
+
+ // Figure out the items we'll be copying into the slice
+ var items []ast.Node
+ switch n := node.(type) {
+ case *ast.ObjectList:
+ items = make([]ast.Node, len(n.Items))
+ for i, item := range n.Items {
+ items[i] = item
+ }
+ case *ast.ObjectType:
+ items = []ast.Node{n}
+ case *ast.ListType:
+ items = n.List
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("unknown slice type: %T", node),
+ }
+ }
+
+ for i, item := range items {
+ fieldName := fmt.Sprintf("%s[%d]", name, i)
+
+ // Decode
+ val := reflect.Indirect(reflect.New(resultElemType))
+
+ // if item is an object that was decoded from ambiguous JSON and
+ // flattened, make sure it's expanded if it needs to decode into a
+ // defined structure.
+ item := expandObject(item, val)
+
+ if err := d.decode(fieldName, item, val); err != nil {
+ return err
+ }
+
+ // Append it onto the slice
+ result = reflect.Append(result, val)
+ }
+
+ set.Set(result)
+ return nil
+}
+
+// expandObject detects if an ambiguous JSON object was flattened to a List which
+// should be decoded into a struct, and expands the ast to properly deocode.
+func expandObject(node ast.Node, result reflect.Value) ast.Node {
+ item, ok := node.(*ast.ObjectItem)
+ if !ok {
+ return node
+ }
+
+ elemType := result.Type()
+
+ // our target type must be a struct
+ switch elemType.Kind() {
+ case reflect.Ptr:
+ switch elemType.Elem().Kind() {
+ case reflect.Struct:
+ //OK
+ default:
+ return node
+ }
+ case reflect.Struct:
+ //OK
+ default:
+ return node
+ }
+
+ // A list value will have a key and field name. If it had more fields,
+ // it wouldn't have been flattened.
+ if len(item.Keys) != 2 {
+ return node
+ }
+
+ keyToken := item.Keys[0].Token
+ item.Keys = item.Keys[1:]
+
+ // we need to un-flatten the ast enough to decode
+ newNode := &ast.ObjectItem{
+ Keys: []*ast.ObjectKey{
+ &ast.ObjectKey{
+ Token: keyToken,
+ },
+ },
+ Val: &ast.ObjectType{
+ List: &ast.ObjectList{
+ Items: []*ast.ObjectItem{item},
+ },
+ },
+ }
+
+ return newNode
+}
+
+func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.NUMBER:
+ result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
+ return nil
+ case token.STRING, token.HEREDOC:
+ result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type for string %T", name, node),
+ }
+}
+
+func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
+ var item *ast.ObjectItem
+ if it, ok := node.(*ast.ObjectItem); ok {
+ item = it
+ node = it.Val
+ }
+
+ if ot, ok := node.(*ast.ObjectType); ok {
+ node = ot.List
+ }
+
+ // Handle the special case where the object itself is a literal. Previously
+ // the yacc parser would always ensure top-level elements were arrays. The new
+ // parser does not make the same guarantees, thus we need to convert any
+ // top-level literal elements into a list.
+ if _, ok := node.(*ast.LiteralType); ok && item != nil {
+ node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+ }
+
+ list, ok := node.(*ast.ObjectList)
+ if !ok {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
+ }
+ }
+
+ // This slice will keep track of all the structs we'll be decoding.
+ // There can be more than one struct if there are embedded structs
+ // that are squashed.
+ structs := make([]reflect.Value, 1, 5)
+ structs[0] = result
+
+ // Compile the list of all the fields that we're going to be decoding
+ // from all the structs.
+ type field struct {
+ field reflect.StructField
+ val reflect.Value
+ }
+ fields := []field{}
+ for len(structs) > 0 {
+ structVal := structs[0]
+ structs = structs[1:]
+
+ structType := structVal.Type()
+ for i := 0; i < structType.NumField(); i++ {
+ fieldType := structType.Field(i)
+ tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
+
+ // Ignore fields with tag name "-"
+ if tagParts[0] == "-" {
+ continue
+ }
+
+ if fieldType.Anonymous {
+ fieldKind := fieldType.Type.Kind()
+ if fieldKind != reflect.Struct {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unsupported type to struct: %s",
+ fieldType.Name, fieldKind),
+ }
+ }
+
+ // We have an embedded field. We "squash" the fields down
+ // if specified in the tag.
+ squash := false
+ for _, tag := range tagParts[1:] {
+ if tag == "squash" {
+ squash = true
+ break
+ }
+ }
+
+ if squash {
+ structs = append(
+ structs, result.FieldByName(fieldType.Name))
+ continue
+ }
+ }
+
+ // Normal struct field, store it away
+ fields = append(fields, field{fieldType, structVal.Field(i)})
+ }
+ }
+
+ usedKeys := make(map[string]struct{})
+ decodedFields := make([]string, 0, len(fields))
+ decodedFieldsVal := make([]reflect.Value, 0)
+ unusedKeysVal := make([]reflect.Value, 0)
+ for _, f := range fields {
+ field, fieldValue := f.field, f.val
+ if !fieldValue.IsValid() {
+ // This should never happen
+ panic("field is not valid")
+ }
+
+ // If we can't set the field, then it is unexported or something,
+ // and we just continue onwards.
+ if !fieldValue.CanSet() {
+ continue
+ }
+
+ fieldName := field.Name
+
+ tagValue := field.Tag.Get(tagName)
+ tagParts := strings.SplitN(tagValue, ",", 2)
+ if len(tagParts) >= 2 {
+ switch tagParts[1] {
+ case "decodedFields":
+ decodedFieldsVal = append(decodedFieldsVal, fieldValue)
+ continue
+ case "key":
+ if item == nil {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: %s asked for 'key', impossible",
+ name, fieldName),
+ }
+ }
+
+ fieldValue.SetString(item.Keys[0].Token.Value().(string))
+ continue
+ case "unusedKeys":
+ unusedKeysVal = append(unusedKeysVal, fieldValue)
+ continue
+ }
+ }
+
+ if tagParts[0] != "" {
+ fieldName = tagParts[0]
+ }
+
+ // Determine the element we'll use to decode. If it is a single
+ // match (only object with the field), then we decode it exactly.
+ // If it is a prefix match, then we decode the matches.
+ filter := list.Filter(fieldName)
+
+ prefixMatches := filter.Children()
+ matches := filter.Elem()
+ if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
+ continue
+ }
+
+ // Track the used key
+ usedKeys[fieldName] = struct{}{}
+
+ // Create the field name and decode. We range over the elements
+ // because we actually want the value.
+ fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+ if len(prefixMatches.Items) > 0 {
+ if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil {
+ return err
+ }
+ }
+ for _, match := range matches.Items {
+ var decodeNode ast.Node = match.Val
+ if ot, ok := decodeNode.(*ast.ObjectType); ok {
+ decodeNode = &ast.ObjectList{Items: ot.List.Items}
+ }
+
+ if err := d.decode(fieldName, decodeNode, fieldValue); err != nil {
+ return err
+ }
+ }
+
+ decodedFields = append(decodedFields, field.Name)
+ }
+
+ if len(decodedFieldsVal) > 0 {
+ // Sort it so that it is deterministic
+ sort.Strings(decodedFields)
+
+ for _, v := range decodedFieldsVal {
+ v.Set(reflect.ValueOf(decodedFields))
+ }
+ }
+
+ return nil
+}
+
+// findNodeType returns the type of ast.Node
+func findNodeType() reflect.Type {
+ var nodeContainer struct {
+ Node ast.Node
+ }
+ value := reflect.ValueOf(nodeContainer).FieldByName("Node")
+ return value.Type()
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go
new file mode 100644
index 0000000000..575a20b50b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl.go
@@ -0,0 +1,11 @@
+// Package hcl decodes HCL into usable Go structures.
+//
+// hcl input can come in either pure HCL format or JSON format.
+// It can be parsed into an AST, and then decoded into a structure,
+// or it can be decoded directly from a string into a structure.
+//
+// If you choose to parse HCL into a raw AST, the benefit is that you
+// can write custom visitor implementations to implement custom
+// semantic checks. By default, HCL does not perform any semantic
+// checks.
+package hcl
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
new file mode 100644
index 0000000000..6e5ef654bb
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
@@ -0,0 +1,219 @@
+// Package ast declares the types used to represent syntax trees for HCL
+// (HashiCorp Configuration Language)
+package ast
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// Node is an element in the abstract syntax tree.
+type Node interface {
+ node()
+ Pos() token.Pos
+}
+
+func (File) node() {}
+func (ObjectList) node() {}
+func (ObjectKey) node() {}
+func (ObjectItem) node() {}
+func (Comment) node() {}
+func (CommentGroup) node() {}
+func (ObjectType) node() {}
+func (LiteralType) node() {}
+func (ListType) node() {}
+
+// File represents a single HCL file
+type File struct {
+ Node Node // usually a *ObjectList
+ Comments []*CommentGroup // list of all comments in the source
+}
+
+func (f *File) Pos() token.Pos {
+ return f.Node.Pos()
+}
+
+// ObjectList represents a list of ObjectItems. An HCL file itself is an
+// ObjectList.
+type ObjectList struct {
+ Items []*ObjectItem
+}
+
+func (o *ObjectList) Add(item *ObjectItem) {
+ o.Items = append(o.Items, item)
+}
+
+// Filter filters out the objects with the given key list as a prefix.
+//
+// The returned list of objects contain ObjectItems where the keys have
+// this prefix already stripped off. This might result in objects with
+// zero-length key lists if they have no children.
+//
+// If no matches are found, an empty ObjectList (non-nil) is returned.
+func (o *ObjectList) Filter(keys ...string) *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ // If there aren't enough keys, then ignore this
+ if len(item.Keys) < len(keys) {
+ continue
+ }
+
+ match := true
+ for i, key := range item.Keys[:len(keys)] {
+ key := key.Token.Value().(string)
+ if key != keys[i] && !strings.EqualFold(key, keys[i]) {
+ match = false
+ break
+ }
+ }
+ if !match {
+ continue
+ }
+
+ // Strip off the prefix from the children
+ newItem := *item
+ newItem.Keys = newItem.Keys[len(keys):]
+ result.Add(&newItem)
+ }
+
+ return &result
+}
+
+// Children returns further nested objects (key length > 0) within this
+// ObjectList. This should be used with Filter to get at child items.
+func (o *ObjectList) Children() *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ if len(item.Keys) > 0 {
+ result.Add(item)
+ }
+ }
+
+ return &result
+}
+
+// Elem returns items in the list that are direct element assignments
+// (key length == 0). This should be used with Filter to get at elements.
+func (o *ObjectList) Elem() *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ if len(item.Keys) == 0 {
+ result.Add(item)
+ }
+ }
+
+ return &result
+}
+
+func (o *ObjectList) Pos() token.Pos {
+ // always returns the uninitiliazed position
+ return o.Items[0].Pos()
+}
+
+// ObjectItem represents a HCL Object Item. An item is represented with a key
+// (or keys). It can be an assignment or an object (both normal and nested)
+type ObjectItem struct {
+ // keys is only one length long if it's of type assignment. If it's a
+ // nested object it can be larger than one. In that case "assign" is
+ // invalid as there is no assignments for a nested object.
+ Keys []*ObjectKey
+
+ // assign contains the position of "=", if any
+ Assign token.Pos
+
+ // val is the item itself. It can be an object,list, number, bool or a
+ // string. If key length is larger than one, val can be only of type
+ // Object.
+ Val Node
+
+ LeadComment *CommentGroup // associated lead comment
+ LineComment *CommentGroup // associated line comment
+}
+
+func (o *ObjectItem) Pos() token.Pos {
+ // I'm not entirely sure what causes this, but removing this causes
+ // a test failure. We should investigate at some point.
+ if len(o.Keys) == 0 {
+ return token.Pos{}
+ }
+
+ return o.Keys[0].Pos()
+}
+
+// ObjectKeys are either an identifier or of type string.
+type ObjectKey struct {
+ Token token.Token
+}
+
+func (o *ObjectKey) Pos() token.Pos {
+ return o.Token.Pos
+}
+
+// LiteralType represents a literal of basic type. Valid types are:
+// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
+type LiteralType struct {
+ Token token.Token
+
+ // comment types, only used when in a list
+ LeadComment *CommentGroup
+ LineComment *CommentGroup
+}
+
+func (l *LiteralType) Pos() token.Pos {
+ return l.Token.Pos
+}
+
+// ListStatement represents a HCL List type
+type ListType struct {
+ Lbrack token.Pos // position of "["
+ Rbrack token.Pos // position of "]"
+ List []Node // the elements in lexical order
+}
+
+func (l *ListType) Pos() token.Pos {
+ return l.Lbrack
+}
+
+func (l *ListType) Add(node Node) {
+ l.List = append(l.List, node)
+}
+
+// ObjectType represents a HCL Object Type
+type ObjectType struct {
+ Lbrace token.Pos // position of "{"
+ Rbrace token.Pos // position of "}"
+ List *ObjectList // the nodes in lexical order
+}
+
+func (o *ObjectType) Pos() token.Pos {
+ return o.Lbrace
+}
+
+// Comment node represents a single //, # style or /*- style commment
+type Comment struct {
+ Start token.Pos // position of / or #
+ Text string
+}
+
+func (c *Comment) Pos() token.Pos {
+ return c.Start
+}
+
+// CommentGroup node represents a sequence of comments with no other tokens and
+// no empty lines between.
+type CommentGroup struct {
+ List []*Comment // len(List) > 0
+}
+
+func (c *CommentGroup) Pos() token.Pos {
+ return c.List[0].Pos()
+}
+
+//-------------------------------------------------------------------
+// GoStringer
+//-------------------------------------------------------------------
+
+func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
+func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
new file mode 100644
index 0000000000..ba07ad42b0
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
@@ -0,0 +1,52 @@
+package ast
+
+import "fmt"
+
+// WalkFunc describes a function to be called for each node during a Walk. The
+// returned node can be used to rewrite the AST. Walking stops the returned
+// bool is false.
+type WalkFunc func(Node) (Node, bool)
+
+// Walk traverses an AST in depth-first order: It starts by calling fn(node);
+// node must not be nil. If fn returns true, Walk invokes fn recursively for
+// each of the non-nil children of node, followed by a call of fn(nil). The
+// returned node of fn can be used to rewrite the passed node to fn.
+func Walk(node Node, fn WalkFunc) Node {
+ rewritten, ok := fn(node)
+ if !ok {
+ return rewritten
+ }
+
+ switch n := node.(type) {
+ case *File:
+ n.Node = Walk(n.Node, fn)
+ case *ObjectList:
+ for i, item := range n.Items {
+ n.Items[i] = Walk(item, fn).(*ObjectItem)
+ }
+ case *ObjectKey:
+ // nothing to do
+ case *ObjectItem:
+ for i, k := range n.Keys {
+ n.Keys[i] = Walk(k, fn).(*ObjectKey)
+ }
+
+ if n.Val != nil {
+ n.Val = Walk(n.Val, fn)
+ }
+ case *LiteralType:
+ // nothing to do
+ case *ListType:
+ for i, l := range n.List {
+ n.List[i] = Walk(l, fn)
+ }
+ case *ObjectType:
+ n.List = Walk(n.List, fn).(*ObjectList)
+ default:
+ // should we panic here?
+ fmt.Printf("unknown type: %T\n", n)
+ }
+
+ fn(nil)
+ return rewritten
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
new file mode 100644
index 0000000000..5c99381dfb
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
@@ -0,0 +1,17 @@
+package parser
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// PosError is a parse error that contains a position.
+type PosError struct {
+ Pos token.Pos
+ Err error
+}
+
+func (e *PosError) Error() string {
+ return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
new file mode 100644
index 0000000000..64c83bcfb5
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
@@ -0,0 +1,532 @@
+// Package parser implements a parser for HCL (HashiCorp Configuration
+// Language)
+package parser
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/scanner"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+type Parser struct {
+ sc *scanner.Scanner
+
+ // Last read token
+ tok token.Token
+ commaPrev token.Token
+
+ comments []*ast.CommentGroup
+ leadComment *ast.CommentGroup // last lead comment
+ lineComment *ast.CommentGroup // last line comment
+
+ enableTrace bool
+ indent int
+ n int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+ return &Parser{
+ sc: scanner.New(src),
+ }
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+ // normalize all line endings
+ // since the scanner and output only work with "\n" line endings, we may
+ // end up with dangling "\r" characters in the parsed data.
+ src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
+
+ p := newParser(src)
+ return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+ f := &ast.File{}
+ var err, scerr error
+ p.sc.Error = func(pos token.Pos, msg string) {
+ scerr = &PosError{Pos: pos, Err: errors.New(msg)}
+ }
+
+ f.Node, err = p.objectList(false)
+ if scerr != nil {
+ return nil, scerr
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ f.Comments = p.comments
+ return f, nil
+}
+
+// objectList parses a list of items within an object (generally k/v pairs).
+// The parameter" obj" tells this whether to we are within an object (braces:
+// '{', '}') or just at the top level. If we're within an object, we end
+// at an RBRACE.
+func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
+ defer un(trace(p, "ParseObjectList"))
+ node := &ast.ObjectList{}
+
+ for {
+ if obj {
+ tok := p.scan()
+ p.unscan()
+ if tok.Type == token.RBRACE {
+ break
+ }
+ }
+
+ n, err := p.objectItem()
+ if err == errEofToken {
+ break // we are finished
+ }
+
+ // we don't return a nil node, because might want to use already
+ // collected items.
+ if err != nil {
+ return node, err
+ }
+
+ node.Add(n)
+
+ // object lists can be optionally comma-delimited e.g. when a list of maps
+ // is being expressed, so a comma is allowed here - it's simply consumed
+ tok := p.scan()
+ if tok.Type != token.COMMA {
+ p.unscan()
+ }
+ }
+ return node, nil
+}
+
+func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
+ endline = p.tok.Pos.Line
+
+ // count the endline if it's multiline comment, ie starting with /*
+ if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
+ // don't use range here - no need to decode Unicode code points
+ for i := 0; i < len(p.tok.Text); i++ {
+ if p.tok.Text[i] == '\n' {
+ endline++
+ }
+ }
+ }
+
+ comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
+ p.tok = p.sc.Scan()
+ return
+}
+
+func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
+ var list []*ast.Comment
+ endline = p.tok.Pos.Line
+
+ for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
+ var comment *ast.Comment
+ comment, endline = p.consumeComment()
+ list = append(list, comment)
+ }
+
+ // add comment group to the comments list
+ comments = &ast.CommentGroup{List: list}
+ p.comments = append(p.comments, comments)
+
+ return
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+ defer un(trace(p, "ParseObjectItem"))
+
+ keys, err := p.objectKey()
+ if len(keys) > 0 && err == errEofToken {
+ // We ignore eof token here since it is an error if we didn't
+ // receive a value (but we did receive a key) for the item.
+ err = nil
+ }
+ if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
+ // This is a strange boolean statement, but what it means is:
+ // We have keys with no value, and we're likely in an object
+ // (since RBrace ends an object). For this, we set err to nil so
+ // we continue and get the error below of having the wrong value
+ // type.
+ err = nil
+
+ // Reset the token type so we don't think it completed fine. See
+ // objectType which uses p.tok.Type to check if we're done with
+ // the object.
+ p.tok.Type = token.EOF
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ o := &ast.ObjectItem{
+ Keys: keys,
+ }
+
+ if p.leadComment != nil {
+ o.LeadComment = p.leadComment
+ p.leadComment = nil
+ }
+
+ switch p.tok.Type {
+ case token.ASSIGN:
+ o.Assign = p.tok.Pos
+ o.Val, err = p.object()
+ if err != nil {
+ return nil, err
+ }
+ case token.LBRACE:
+ o.Val, err = p.objectType()
+ if err != nil {
+ return nil, err
+ }
+ default:
+ keyStr := make([]string, 0, len(keys))
+ for _, k := range keys {
+ keyStr = append(keyStr, k.Token.Text)
+ }
+
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf(
+ "key '%s' expected start of object ('{') or assignment ('=')",
+ strings.Join(keyStr, " ")),
+ }
+ }
+
+ // key=#comment
+ // val
+ if p.lineComment != nil {
+ o.LineComment, p.lineComment = p.lineComment, nil
+ }
+
+ // do a look-ahead for line comment
+ p.scan()
+ if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
+ o.LineComment = p.lineComment
+ p.lineComment = nil
+ }
+ p.unscan()
+ return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+ keyCount := 0
+ keys := make([]*ast.ObjectKey, 0)
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.EOF:
+ // It is very important to also return the keys here as well as
+ // the error. This is because we need to be able to tell if we
+ // did parse keys prior to finding the EOF, or if we just found
+ // a bare EOF.
+ return keys, errEofToken
+ case token.ASSIGN:
+ // assignment or object only, but not nested objects. this is not
+ // allowed: `foo bar = {}`
+ if keyCount > 1 {
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
+ }
+ }
+
+ if keyCount == 0 {
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: errors.New("no object keys found!"),
+ }
+ }
+
+ return keys, nil
+ case token.LBRACE:
+ var err error
+
+ // If we have no keys, then it is a syntax error. i.e. {{}} is not
+ // allowed.
+ if len(keys) == 0 {
+ err = &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
+ }
+ }
+
+ // object
+ return keys, err
+ case token.IDENT, token.STRING:
+ keyCount++
+ keys = append(keys, &ast.ObjectKey{Token: p.tok})
+ case token.ILLEGAL:
+ return keys, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("illegal character"),
+ }
+ default:
+ return keys, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
+ }
+ }
+ }
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (ast.Node, error) {
+ defer un(trace(p, "ParseType"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
+ return p.literalType()
+ case token.LBRACE:
+ return p.objectType()
+ case token.LBRACK:
+ return p.listType()
+ case token.COMMENT:
+ // implement comment
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("Unknown token: %+v", tok),
+ }
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseObjectType"))
+
+ // we assume that the currently scanned token is a LBRACE
+ o := &ast.ObjectType{
+ Lbrace: p.tok.Pos,
+ }
+
+ l, err := p.objectList(true)
+
+ // if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+ // not a RBRACE, it's an syntax error and we just return it.
+ if err != nil && p.tok.Type != token.RBRACE {
+ return nil, err
+ }
+
+ // No error, scan and expect the ending to be a brace
+ if tok := p.scan(); tok.Type != token.RBRACE {
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
+ }
+ }
+
+ o.List = l
+ o.Rbrace = p.tok.Pos // advanced via parseObjectList
+ return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+ defer un(trace(p, "ParseListType"))
+
+ // we assume that the currently scanned token is a LBRACK
+ l := &ast.ListType{
+ Lbrack: p.tok.Pos,
+ }
+
+ needComma := false
+ for {
+ tok := p.scan()
+ if needComma {
+ switch tok.Type {
+ case token.COMMA, token.RBRACK:
+ default:
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf(
+ "error parsing list, expected comma or list end, got: %s",
+ tok.Type),
+ }
+ }
+ }
+ switch tok.Type {
+ case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
+ node, err := p.literalType()
+ if err != nil {
+ return nil, err
+ }
+
+ // If there is a lead comment, apply it
+ if p.leadComment != nil {
+ node.LeadComment = p.leadComment
+ p.leadComment = nil
+ }
+
+ l.Add(node)
+ needComma = true
+ case token.COMMA:
+ // get next list item or we are at the end
+ // do a look-ahead for line comment
+ p.scan()
+ if p.lineComment != nil && len(l.List) > 0 {
+ lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
+ if ok {
+ lit.LineComment = p.lineComment
+ l.List[len(l.List)-1] = lit
+ p.lineComment = nil
+ }
+ }
+ p.unscan()
+
+ needComma = false
+ continue
+ case token.LBRACE:
+ // Looks like a nested object, so parse it out
+ node, err := p.objectType()
+ if err != nil {
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf(
+ "error while trying to parse object within list: %s", err),
+ }
+ }
+ l.Add(node)
+ needComma = true
+ case token.LBRACK:
+ node, err := p.listType()
+ if err != nil {
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf(
+ "error while trying to parse list within list: %s", err),
+ }
+ }
+ l.Add(node)
+ case token.RBRACK:
+ // finished
+ l.Rbrack = p.tok.Pos
+ return l, nil
+ default:
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
+ }
+ }
+ }
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+ defer un(trace(p, "ParseLiteral"))
+
+ return &ast.LiteralType{
+ Token: p.tok,
+ }, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead. In the process, it collects any
+// comment groups encountered, and remembers the last lead and line comments.
+func (p *Parser) scan() token.Token {
+ // If we have a token on the buffer, then return it.
+ if p.n != 0 {
+ p.n = 0
+ return p.tok
+ }
+
+ // Otherwise read the next token from the scanner and Save it to the buffer
+ // in case we unscan later.
+ prev := p.tok
+ p.tok = p.sc.Scan()
+
+ if p.tok.Type == token.COMMENT {
+ var comment *ast.CommentGroup
+ var endline int
+
+ // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
+ // p.tok.Pos.Line, prev.Pos.Line, endline)
+ if p.tok.Pos.Line == prev.Pos.Line {
+ // The comment is on same line as the previous token; it
+ // cannot be a lead comment but may be a line comment.
+ comment, endline = p.consumeCommentGroup(0)
+ if p.tok.Pos.Line != endline {
+ // The next token is on a different line, thus
+ // the last comment group is a line comment.
+ p.lineComment = comment
+ }
+ }
+
+ // consume successor comments, if any
+ endline = -1
+ for p.tok.Type == token.COMMENT {
+ comment, endline = p.consumeCommentGroup(1)
+ }
+
+ if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
+ switch p.tok.Type {
+ case token.RBRACE, token.RBRACK:
+ // Do not count for these cases
+ default:
+ // The next token is following on the line immediately after the
+ // comment group, thus the last comment group is a lead comment.
+ p.leadComment = comment
+ }
+ }
+
+ }
+
+ return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+ p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+ i := 2 * p.indent
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+ p.indent--
+ p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
new file mode 100644
index 0000000000..7c038d12a2
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
@@ -0,0 +1,789 @@
+package printer
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+const (
+ blank = byte(' ')
+ newline = byte('\n')
+ tab = byte('\t')
+ infinity = 1 << 30 // offset or line
+)
+
+var (
+ unindent = []byte("\uE123") // in the private use space
+)
+
+type printer struct {
+ cfg Config
+ prev token.Pos
+
+ comments []*ast.CommentGroup // may be nil, contains all comments
+ standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node)
+
+ enableTrace bool
+ indentTrace int
+}
+
+type ByPosition []*ast.CommentGroup
+
+func (b ByPosition) Len() int { return len(b) }
+func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) }
+
+// collectComments comments all standalone comments which are not lead or line
+// comment
+func (p *printer) collectComments(node ast.Node) {
+ // first collect all comments. This is already stored in
+ // ast.File.(comments)
+ ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
+ switch t := nn.(type) {
+ case *ast.File:
+ p.comments = t.Comments
+ return nn, false
+ }
+ return nn, true
+ })
+
+ standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0)
+ for _, c := range p.comments {
+ standaloneComments[c.Pos()] = c
+ }
+
+ // next remove all lead and line comments from the overall comment map.
+ // This will give us comments which are standalone, comments which are not
+ // assigned to any kind of node.
+ ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
+ switch t := nn.(type) {
+ case *ast.LiteralType:
+ if t.LeadComment != nil {
+ for _, comment := range t.LeadComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+
+ if t.LineComment != nil {
+ for _, comment := range t.LineComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+ case *ast.ObjectItem:
+ if t.LeadComment != nil {
+ for _, comment := range t.LeadComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+
+ if t.LineComment != nil {
+ for _, comment := range t.LineComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+ }
+
+ return nn, true
+ })
+
+ for _, c := range standaloneComments {
+ p.standaloneComments = append(p.standaloneComments, c)
+ }
+
+ sort.Sort(ByPosition(p.standaloneComments))
+}
+
+// output prints creates b printable HCL output and returns it.
+func (p *printer) output(n interface{}) []byte {
+ var buf bytes.Buffer
+
+ switch t := n.(type) {
+ case *ast.File:
+ // File doesn't trace so we add the tracing here
+ defer un(trace(p, "File"))
+ return p.output(t.Node)
+ case *ast.ObjectList:
+ defer un(trace(p, "ObjectList"))
+
+ var index int
+ for {
+ // Determine the location of the next actual non-comment
+ // item. If we're at the end, the next item is at "infinity"
+ var nextItem token.Pos
+ if index != len(t.Items) {
+ nextItem = t.Items[index].Pos()
+ } else {
+ nextItem = token.Pos{Offset: infinity, Line: infinity}
+ }
+
+ // Go through the standalone comments in the file and print out
+ // the comments that we should be for this object item.
+ for _, c := range p.standaloneComments {
+ // Go through all the comments in the group. The group
+ // should be printed together, not separated by double newlines.
+ printed := false
+ newlinePrinted := false
+ for _, comment := range c.List {
+ // We only care about comments after the previous item
+ // we've printed so that comments are printed in the
+ // correct locations (between two objects for example).
+ // And before the next item.
+ if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
+ // if we hit the end add newlines so we can print the comment
+ // we don't do this if prev is invalid which means the
+ // beginning of the file since the first comment should
+ // be at the first line.
+ if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) {
+ buf.Write([]byte{newline, newline})
+ newlinePrinted = true
+ }
+
+ // Write the actual comment.
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+
+ // Set printed to true to note that we printed something
+ printed = true
+ }
+ }
+
+ // If we're not at the last item, write a new line so
+ // that there is a newline separating this comment from
+ // the next object.
+ if printed && index != len(t.Items) {
+ buf.WriteByte(newline)
+ }
+ }
+
+ if index == len(t.Items) {
+ break
+ }
+
+ buf.Write(p.output(t.Items[index]))
+ if index != len(t.Items)-1 {
+ // Always write a newline to separate us from the next item
+ buf.WriteByte(newline)
+
+ // Need to determine if we're going to separate the next item
+ // with a blank line. The logic here is simple, though there
+ // are a few conditions:
+ //
+ // 1. The next object is more than one line away anyways,
+ // so we need an empty line.
+ //
+ // 2. The next object is not a "single line" object, so
+ // we need an empty line.
+ //
+ // 3. This current object is not a single line object,
+ // so we need an empty line.
+ current := t.Items[index]
+ next := t.Items[index+1]
+ if next.Pos().Line != t.Items[index].Pos().Line+1 ||
+ !p.isSingleLineObject(next) ||
+ !p.isSingleLineObject(current) {
+ buf.WriteByte(newline)
+ }
+ }
+ index++
+ }
+ case *ast.ObjectKey:
+ buf.WriteString(t.Token.Text)
+ case *ast.ObjectItem:
+ p.prev = t.Pos()
+ buf.Write(p.objectItem(t))
+ case *ast.LiteralType:
+ buf.Write(p.literalType(t))
+ case *ast.ListType:
+ buf.Write(p.list(t))
+ case *ast.ObjectType:
+ buf.Write(p.objectType(t))
+ default:
+ fmt.Printf(" unknown type: %T\n", n)
+ }
+
+ return buf.Bytes()
+}
+
+func (p *printer) literalType(lit *ast.LiteralType) []byte {
+ result := []byte(lit.Token.Text)
+ switch lit.Token.Type {
+ case token.HEREDOC:
+ // Clear the trailing newline from heredocs
+ if result[len(result)-1] == '\n' {
+ result = result[:len(result)-1]
+ }
+
+ // Poison lines 2+ so that we don't indent them
+ result = p.heredocIndent(result)
+ case token.STRING:
+ // If this is a multiline string, poison lines 2+ so we don't
+ // indent them.
+ if bytes.IndexRune(result, '\n') >= 0 {
+ result = p.heredocIndent(result)
+ }
+ }
+
+ return result
+}
+
+// objectItem returns the printable HCL form of an object item. An object type
+// starts with one/multiple keys and has a value. The value might be of any
+// type.
+func (p *printer) objectItem(o *ast.ObjectItem) []byte {
+ defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text)))
+ var buf bytes.Buffer
+
+ if o.LeadComment != nil {
+ for _, comment := range o.LeadComment.List {
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+ }
+ }
+
+ // If key and val are on different lines, treat line comments like lead comments.
+ if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line {
+ for _, comment := range o.LineComment.List {
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+ }
+ }
+
+ for i, k := range o.Keys {
+ buf.WriteString(k.Token.Text)
+ buf.WriteByte(blank)
+
+ // reach end of key
+ if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 {
+ buf.WriteString("=")
+ buf.WriteByte(blank)
+ }
+ }
+
+ buf.Write(p.output(o.Val))
+
+ if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line {
+ buf.WriteByte(blank)
+ for _, comment := range o.LineComment.List {
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ return buf.Bytes()
+}
+
+// objectType returns the printable HCL form of an object type. An object type
+// begins with a brace and ends with a brace.
+func (p *printer) objectType(o *ast.ObjectType) []byte {
+ defer un(trace(p, "ObjectType"))
+ var buf bytes.Buffer
+ buf.WriteString("{")
+
+ var index int
+ var nextItem token.Pos
+ var commented, newlinePrinted bool
+ for {
+ // Determine the location of the next actual non-comment
+ // item. If we're at the end, the next item is the closing brace
+ if index != len(o.List.Items) {
+ nextItem = o.List.Items[index].Pos()
+ } else {
+ nextItem = o.Rbrace
+ }
+
+ // Go through the standalone comments in the file and print out
+ // the comments that we should be for this object item.
+ for _, c := range p.standaloneComments {
+ printed := false
+ var lastCommentPos token.Pos
+ for _, comment := range c.List {
+ // We only care about comments after the previous item
+ // we've printed so that comments are printed in the
+ // correct locations (between two objects for example).
+ // And before the next item.
+ if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
+ // If there are standalone comments and the initial newline has not
+ // been printed yet, do it now.
+ if !newlinePrinted {
+ newlinePrinted = true
+ buf.WriteByte(newline)
+ }
+
+ // add newline if it's between other printed nodes
+ if index > 0 {
+ commented = true
+ buf.WriteByte(newline)
+ }
+
+ // Store this position
+ lastCommentPos = comment.Pos()
+
+ // output the comment itself
+ buf.Write(p.indent(p.heredocIndent([]byte(comment.Text))))
+
+ // Set printed to true to note that we printed something
+ printed = true
+
+ /*
+ if index != len(o.List.Items) {
+ buf.WriteByte(newline) // do not print on the end
+ }
+ */
+ }
+ }
+
+ // Stuff to do if we had comments
+ if printed {
+ // Always write a newline
+ buf.WriteByte(newline)
+
+ // If there is another item in the object and our comment
+ // didn't hug it directly, then make sure there is a blank
+ // line separating them.
+ if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 {
+ buf.WriteByte(newline)
+ }
+ }
+ }
+
+ if index == len(o.List.Items) {
+ p.prev = o.Rbrace
+ break
+ }
+
+ // At this point we are sure that it's not a totally empty block: print
+ // the initial newline if it hasn't been printed yet by the previous
+ // block about standalone comments.
+ if !newlinePrinted {
+ buf.WriteByte(newline)
+ newlinePrinted = true
+ }
+
+ // check if we have adjacent one liner items. If yes we'll going to align
+ // the comments.
+ var aligned []*ast.ObjectItem
+ for _, item := range o.List.Items[index:] {
+ // we don't group one line lists
+ if len(o.List.Items) == 1 {
+ break
+ }
+
+ // one means a oneliner with out any lead comment
+ // two means a oneliner with lead comment
+ // anything else might be something else
+ cur := lines(string(p.objectItem(item)))
+ if cur > 2 {
+ break
+ }
+
+ curPos := item.Pos()
+
+ nextPos := token.Pos{}
+ if index != len(o.List.Items)-1 {
+ nextPos = o.List.Items[index+1].Pos()
+ }
+
+ prevPos := token.Pos{}
+ if index != 0 {
+ prevPos = o.List.Items[index-1].Pos()
+ }
+
+ // fmt.Println("DEBUG ----------------")
+ // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos)
+ // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos)
+ // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos)
+
+ if curPos.Line+1 == nextPos.Line {
+ aligned = append(aligned, item)
+ index++
+ continue
+ }
+
+ if curPos.Line-1 == prevPos.Line {
+ aligned = append(aligned, item)
+ index++
+
+ // finish if we have a new line or comment next. This happens
+ // if the next item is not adjacent
+ if curPos.Line+1 != nextPos.Line {
+ break
+ }
+ continue
+ }
+
+ break
+ }
+
+ // put newlines if the items are between other non aligned items.
+ // newlines are also added if there is a standalone comment already, so
+ // check it too
+ if !commented && index != len(aligned) {
+ buf.WriteByte(newline)
+ }
+
+ if len(aligned) >= 1 {
+ p.prev = aligned[len(aligned)-1].Pos()
+
+ items := p.alignedItems(aligned)
+ buf.Write(p.indent(items))
+ } else {
+ p.prev = o.List.Items[index].Pos()
+
+ buf.Write(p.indent(p.objectItem(o.List.Items[index])))
+ index++
+ }
+
+ buf.WriteByte(newline)
+ }
+
+ buf.WriteString("}")
+ return buf.Bytes()
+}
+
+func (p *printer) alignedItems(items []*ast.ObjectItem) []byte {
+ var buf bytes.Buffer
+
+ // find the longest key and value length, needed for alignment
+ var longestKeyLen int // longest key length
+ var longestValLen int // longest value length
+ for _, item := range items {
+ key := len(item.Keys[0].Token.Text)
+ val := len(p.output(item.Val))
+
+ if key > longestKeyLen {
+ longestKeyLen = key
+ }
+
+ if val > longestValLen {
+ longestValLen = val
+ }
+ }
+
+ for i, item := range items {
+ if item.LeadComment != nil {
+ for _, comment := range item.LeadComment.List {
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+ }
+ }
+
+ for i, k := range item.Keys {
+ keyLen := len(k.Token.Text)
+ buf.WriteString(k.Token.Text)
+ for i := 0; i < longestKeyLen-keyLen+1; i++ {
+ buf.WriteByte(blank)
+ }
+
+ // reach end of key
+ if i == len(item.Keys)-1 && len(item.Keys) == 1 {
+ buf.WriteString("=")
+ buf.WriteByte(blank)
+ }
+ }
+
+ val := p.output(item.Val)
+ valLen := len(val)
+ buf.Write(val)
+
+ if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil {
+ for i := 0; i < longestValLen-valLen+1; i++ {
+ buf.WriteByte(blank)
+ }
+
+ for _, comment := range item.LineComment.List {
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ // do not print for the last item
+ if i != len(items)-1 {
+ buf.WriteByte(newline)
+ }
+ }
+
+ return buf.Bytes()
+}
+
+// list returns the printable HCL form of an list type.
+func (p *printer) list(l *ast.ListType) []byte {
+ if p.isSingleLineList(l) {
+ return p.singleLineList(l)
+ }
+
+ var buf bytes.Buffer
+ buf.WriteString("[")
+ buf.WriteByte(newline)
+
+ var longestLine int
+ for _, item := range l.List {
+ // for now we assume that the list only contains literal types
+ if lit, ok := item.(*ast.LiteralType); ok {
+ lineLen := len(lit.Token.Text)
+ if lineLen > longestLine {
+ longestLine = lineLen
+ }
+ }
+ }
+
+ haveEmptyLine := false
+ for i, item := range l.List {
+ // If we have a lead comment, then we want to write that first
+ leadComment := false
+ if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil {
+ leadComment = true
+
+ // Ensure an empty line before every element with a
+ // lead comment (except the first item in a list).
+ if !haveEmptyLine && i != 0 {
+ buf.WriteByte(newline)
+ }
+
+ for _, comment := range lit.LeadComment.List {
+ buf.Write(p.indent([]byte(comment.Text)))
+ buf.WriteByte(newline)
+ }
+ }
+
+ // also indent each line
+ val := p.output(item)
+ curLen := len(val)
+ buf.Write(p.indent(val))
+
+ // if this item is a heredoc, then we output the comma on
+ // the next line. This is the only case this happens.
+ comma := []byte{','}
+ if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
+ buf.WriteByte(newline)
+ comma = p.indent(comma)
+ }
+
+ buf.Write(comma)
+
+ if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
+ // if the next item doesn't have any comments, do not align
+ buf.WriteByte(blank) // align one space
+ for i := 0; i < longestLine-curLen; i++ {
+ buf.WriteByte(blank)
+ }
+
+ for _, comment := range lit.LineComment.List {
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ buf.WriteByte(newline)
+
+ // Ensure an empty line after every element with a
+ // lead comment (except the first item in a list).
+ haveEmptyLine = leadComment && i != len(l.List)-1
+ if haveEmptyLine {
+ buf.WriteByte(newline)
+ }
+ }
+
+ buf.WriteString("]")
+ return buf.Bytes()
+}
+
+// isSingleLineList returns true if:
+// * they were previously formatted entirely on one line
+// * they consist entirely of literals
+// * there are either no heredoc strings or the list has exactly one element
+// * there are no line comments
+func (printer) isSingleLineList(l *ast.ListType) bool {
+ for _, item := range l.List {
+ if item.Pos().Line != l.Lbrack.Line {
+ return false
+ }
+
+ lit, ok := item.(*ast.LiteralType)
+ if !ok {
+ return false
+ }
+
+ if lit.Token.Type == token.HEREDOC && len(l.List) != 1 {
+ return false
+ }
+
+ if lit.LineComment != nil {
+ return false
+ }
+ }
+
+ return true
+}
+
+// singleLineList prints a simple single line list.
+// For a definition of "simple", see isSingleLineList above.
+func (p *printer) singleLineList(l *ast.ListType) []byte {
+ buf := &bytes.Buffer{}
+
+ buf.WriteString("[")
+ for i, item := range l.List {
+ if i != 0 {
+ buf.WriteString(", ")
+ }
+
+ // Output the item itself
+ buf.Write(p.output(item))
+
+ // The heredoc marker needs to be at the end of line.
+ if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
+ buf.WriteByte(newline)
+ }
+ }
+
+ buf.WriteString("]")
+ return buf.Bytes()
+}
+
+// indent indents the lines of the given buffer for each non-empty line
+func (p *printer) indent(buf []byte) []byte {
+ var prefix []byte
+ if p.cfg.SpacesWidth != 0 {
+ for i := 0; i < p.cfg.SpacesWidth; i++ {
+ prefix = append(prefix, blank)
+ }
+ } else {
+ prefix = []byte{tab}
+ }
+
+ var res []byte
+ bol := true
+ for _, c := range buf {
+ if bol && c != '\n' {
+ res = append(res, prefix...)
+ }
+
+ res = append(res, c)
+ bol = c == '\n'
+ }
+ return res
+}
+
+// unindent removes all the indentation from the tombstoned lines
+func (p *printer) unindent(buf []byte) []byte {
+ var res []byte
+ for i := 0; i < len(buf); i++ {
+ skip := len(buf)-i <= len(unindent)
+ if !skip {
+ skip = !bytes.Equal(unindent, buf[i:i+len(unindent)])
+ }
+ if skip {
+ res = append(res, buf[i])
+ continue
+ }
+
+ // We have a marker. we have to backtrace here and clean out
+ // any whitespace ahead of our tombstone up to a \n
+ for j := len(res) - 1; j >= 0; j-- {
+ if res[j] == '\n' {
+ break
+ }
+
+ res = res[:j]
+ }
+
+ // Skip the entire unindent marker
+ i += len(unindent) - 1
+ }
+
+ return res
+}
+
+// heredocIndent marks all the 2nd and further lines as unindentable
+func (p *printer) heredocIndent(buf []byte) []byte {
+ var res []byte
+ bol := false
+ for _, c := range buf {
+ if bol && c != '\n' {
+ res = append(res, unindent...)
+ }
+ res = append(res, c)
+ bol = c == '\n'
+ }
+ return res
+}
+
+// isSingleLineObject tells whether the given object item is a single
+// line object such as "obj {}".
+//
+// A single line object:
+//
+// * has no lead comments (hence multi-line)
+// * has no assignment
+// * has no values in the stanza (within {})
+//
+func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool {
+ // If there is a lead comment, can't be one line
+ if val.LeadComment != nil {
+ return false
+ }
+
+ // If there is assignment, we always break by line
+ if val.Assign.IsValid() {
+ return false
+ }
+
+ // If it isn't an object type, then its not a single line object
+ ot, ok := val.Val.(*ast.ObjectType)
+ if !ok {
+ return false
+ }
+
+ // If the object has no items, it is single line!
+ return len(ot.List.Items) == 0
+}
+
+func lines(txt string) int {
+ endline := 1
+ for i := 0; i < len(txt); i++ {
+ if txt[i] == '\n' {
+ endline++
+ }
+ }
+ return endline
+}
+
+// ----------------------------------------------------------------------------
+// Tracing support
+
+func (p *printer) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ i := 2 * p.indentTrace
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *printer, msg string) *printer {
+ p.printTrace(msg, "(")
+ p.indentTrace++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *printer) {
+ p.indentTrace--
+ p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
new file mode 100644
index 0000000000..6617ab8e7a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
@@ -0,0 +1,66 @@
+// Package printer implements printing of AST nodes to HCL format.
+package printer
+
+import (
+ "bytes"
+ "io"
+ "text/tabwriter"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/parser"
+)
+
+var DefaultConfig = Config{
+ SpacesWidth: 2,
+}
+
+// A Config node controls the output of Fprint.
+type Config struct {
+ SpacesWidth int // if set, it will use spaces instead of tabs for alignment
+}
+
+func (c *Config) Fprint(output io.Writer, node ast.Node) error {
+ p := &printer{
+ cfg: *c,
+ comments: make([]*ast.CommentGroup, 0),
+ standaloneComments: make([]*ast.CommentGroup, 0),
+ // enableTrace: true,
+ }
+
+ p.collectComments(node)
+
+ if _, err := output.Write(p.unindent(p.output(node))); err != nil {
+ return err
+ }
+
+ // flush tabwriter, if any
+ var err error
+ if tw, _ := output.(*tabwriter.Writer); tw != nil {
+ err = tw.Flush()
+ }
+
+ return err
+}
+
+// Fprint "pretty-prints" an HCL node to output
+// It calls Config.Fprint with default settings.
+func Fprint(output io.Writer, node ast.Node) error {
+ return DefaultConfig.Fprint(output, node)
+}
+
+// Format formats src HCL and returns the result.
+func Format(src []byte) ([]byte, error) {
+ node, err := parser.Parse(src)
+ if err != nil {
+ return nil, err
+ }
+
+ var buf bytes.Buffer
+ if err := DefaultConfig.Fprint(&buf, node); err != nil {
+ return nil, err
+ }
+
+ // Add trailing newline to result
+ buf.WriteString("\n")
+ return buf.Bytes(), nil
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
new file mode 100644
index 0000000000..624a18fe3a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
@@ -0,0 +1,652 @@
+// Package scanner implements a scanner for HCL (HashiCorp Configuration
+// Language) source text.
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "regexp"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+ buf *bytes.Buffer // Source buffer for advancing and scanning
+ src []byte // Source buffer for immutable access
+
+ // Source Position
+ srcPos token.Pos // current position
+ prevPos token.Pos // previous position, used for peek() method
+
+ lastCharLen int // length of last character in bytes
+ lastLineLen int // length of last line in characters (for correct column reporting)
+
+ tokStart int // token text start position
+ tokEnd int // token text end position
+
+ // Error is called for each error encountered. If no Error
+ // function is set, the error is reported to os.Stderr.
+ Error func(pos token.Pos, msg string)
+
+ // ErrorCount is incremented by one for each error encountered.
+ ErrorCount int
+
+ // tokPos is the start position of most recently scanned token; set by
+ // Scan. The Filename field is always left untouched by the Scanner. If
+ // an error is reported (via Error) and Position is invalid, the scanner is
+ // not inside a token.
+ tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+ // even though we accept a src, we read from a io.Reader compatible type
+ // (*bytes.Buffer). So in the future we might easily change it to streaming
+ // read.
+ b := bytes.NewBuffer(src)
+ s := &Scanner{
+ buf: b,
+ src: src,
+ }
+
+ // srcPosition always starts with 1
+ s.srcPos.Line = 1
+ return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+ ch, size, err := s.buf.ReadRune()
+ if err != nil {
+ // advance for error reporting
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ return eof
+ }
+
+ // remember last position
+ s.prevPos = s.srcPos
+
+ s.srcPos.Column++
+ s.lastCharLen = size
+ s.srcPos.Offset += size
+
+ if ch == utf8.RuneError && size == 1 {
+ s.err("illegal UTF-8 encoding")
+ return ch
+ }
+
+ if ch == '\n' {
+ s.srcPos.Line++
+ s.lastLineLen = s.srcPos.Column
+ s.srcPos.Column = 0
+ }
+
+ if ch == '\x00' {
+ s.err("unexpected null character (0x00)")
+ return eof
+ }
+
+ if ch == '\uE123' {
+ s.err("unicode code point U+E123 reserved for internal use")
+ return utf8.RuneError
+ }
+
+ // debug
+ // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+ return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+ if err := s.buf.UnreadRune(); err != nil {
+ panic(err) // this is user fault, we should catch it
+ }
+ s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+ peek, _, err := s.buf.ReadRune()
+ if err != nil {
+ return eof
+ }
+
+ s.buf.UnreadRune()
+ return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+ ch := s.next()
+
+ // skip white space
+ for isWhitespace(ch) {
+ ch = s.next()
+ }
+
+ var tok token.Type
+
+ // token text markings
+ s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+ // token position, initial next() is moving the offset by one(size of rune
+ // actually), though we are interested with the starting point
+ s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+ if s.srcPos.Column > 0 {
+ // common case: last character was not a '\n'
+ s.tokPos.Line = s.srcPos.Line
+ s.tokPos.Column = s.srcPos.Column
+ } else {
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ s.tokPos.Line = s.srcPos.Line - 1
+ s.tokPos.Column = s.lastLineLen
+ }
+
+ switch {
+ case isLetter(ch):
+ tok = token.IDENT
+ lit := s.scanIdentifier()
+ if lit == "true" || lit == "false" {
+ tok = token.BOOL
+ }
+ case isDecimal(ch):
+ tok = s.scanNumber(ch)
+ default:
+ switch ch {
+ case eof:
+ tok = token.EOF
+ case '"':
+ tok = token.STRING
+ s.scanString()
+ case '#', '/':
+ tok = token.COMMENT
+ s.scanComment(ch)
+ case '.':
+ tok = token.PERIOD
+ ch = s.peek()
+ if isDecimal(ch) {
+ tok = token.FLOAT
+ ch = s.scanMantissa(ch)
+ ch = s.scanExponent(ch)
+ }
+ case '<':
+ tok = token.HEREDOC
+ s.scanHeredoc()
+ case '[':
+ tok = token.LBRACK
+ case ']':
+ tok = token.RBRACK
+ case '{':
+ tok = token.LBRACE
+ case '}':
+ tok = token.RBRACE
+ case ',':
+ tok = token.COMMA
+ case '=':
+ tok = token.ASSIGN
+ case '+':
+ tok = token.ADD
+ case '-':
+ if isDecimal(s.peek()) {
+ ch := s.next()
+ tok = s.scanNumber(ch)
+ } else {
+ tok = token.SUB
+ }
+ default:
+ s.err("illegal char")
+ }
+ }
+
+ // finish token ending
+ s.tokEnd = s.srcPos.Offset
+
+ // create token literal
+ var tokenText string
+ if s.tokStart >= 0 {
+ tokenText = string(s.src[s.tokStart:s.tokEnd])
+ }
+ s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+ return token.Token{
+ Type: tok,
+ Pos: s.tokPos,
+ Text: tokenText,
+ }
+}
+
+func (s *Scanner) scanComment(ch rune) {
+ // single line comments
+ if ch == '#' || (ch == '/' && s.peek() != '*') {
+ if ch == '/' && s.peek() != '/' {
+ s.err("expected '/' for comment")
+ return
+ }
+
+ ch = s.next()
+ for ch != '\n' && ch >= 0 && ch != eof {
+ ch = s.next()
+ }
+ if ch != eof && ch >= 0 {
+ s.unread()
+ }
+ return
+ }
+
+ // be sure we get the character after /* This allows us to find comment's
+ // that are not erminated
+ if ch == '/' {
+ s.next()
+ ch = s.next() // read character after "/*"
+ }
+
+ // look for /* - style comments
+ for {
+ if ch < 0 || ch == eof {
+ s.err("comment not terminated")
+ break
+ }
+
+ ch0 := ch
+ ch = s.next()
+ if ch0 == '*' && ch == '/' {
+ break
+ }
+ }
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+ if ch == '0' {
+ // check for hexadecimal, octal or float
+ ch = s.next()
+ if ch == 'x' || ch == 'X' {
+ // hexadecimal
+ ch = s.next()
+ found := false
+ for isHexadecimal(ch) {
+ ch = s.next()
+ found = true
+ }
+
+ if !found {
+ s.err("illegal hexadecimal number")
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+
+ return token.NUMBER
+ }
+
+ // now it's either something like: 0421(octal) or 0.1231(float)
+ illegalOctal := false
+ for isDecimal(ch) {
+ ch = s.next()
+ if ch == '8' || ch == '9' {
+ // this is just a possibility. For example 0159 is illegal, but
+ // 0159.23 is valid. So we mark a possible illegal octal. If
+ // the next character is not a period, we'll print the error.
+ illegalOctal = true
+ }
+ }
+
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if illegalOctal {
+ s.err("illegal octal number")
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+ return token.NUMBER
+ }
+
+ s.scanMantissa(ch)
+ ch = s.next() // seek forward
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+ return token.NUMBER
+}
+
+// scanMantissa scans the mantissa beginning from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+ scanned := false
+ for isDecimal(ch) {
+ ch = s.next()
+ scanned = true
+ }
+
+ if scanned && ch != eof {
+ s.unread()
+ }
+ return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+ if ch == '.' {
+ ch = s.peek() // we peek just to see if we can move forward
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ if ch == '-' || ch == '+' {
+ ch = s.next()
+ }
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanHeredoc scans a heredoc string
+func (s *Scanner) scanHeredoc() {
+ // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
+ break
+ }
+
+ // Not an anchor match, record the start of a new line
+ lineStart = s.srcPos.Offset
+ }
+
+ if ch == eof {
+ s.err("heredoc not terminated")
+ return
+ }
+ }
+
+ return
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+ braces := 0
+ for {
+ // '"' opening already consumed
+ // read character after quote
+ ch := s.next()
+
+ if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
+ s.err("literal not terminated")
+ return
+ }
+
+ if ch == '"' && braces == 0 {
+ break
+ }
+
+ // If we're going into a ${} then we can ignore quotes for awhile
+ if braces == 0 && ch == '$' && s.peek() == '{' {
+ braces++
+ s.next()
+ } else if braces > 0 && ch == '{' {
+ braces++
+ }
+ if braces > 0 && ch == '}' {
+ braces--
+ }
+
+ if ch == '\\' {
+ s.scanEscape()
+ }
+ }
+
+ return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+ // http://en.cppreference.com/w/cpp/language/escape
+ ch := s.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+ // nothing to do
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ // octal notation
+ ch = s.scanDigits(ch, 8, 3)
+ case 'x':
+ // hexademical notation
+ ch = s.scanDigits(s.next(), 16, 2)
+ case 'u':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 4)
+ case 'U':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 8)
+ default:
+ s.err("illegal char escape")
+ }
+ return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+ start := n
+ for n > 0 && digitVal(ch) < base {
+ ch = s.next()
+ if ch == eof {
+ // If we see an EOF, we halt any more scanning of digits
+ // immediately.
+ break
+ }
+
+ n--
+ }
+ if n > 0 {
+ s.err("illegal char escape")
+ }
+
+ if n != start && ch != eof {
+ // we scanned all digits, put the last non digit char back,
+ // only if we read anything at all
+ s.unread()
+ }
+
+ return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+ offs := s.srcPos.Offset - s.lastCharLen
+ ch := s.next()
+ for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' {
+ ch = s.next()
+ }
+
+ if ch != eof {
+ s.unread() // we got identifier, put back latest char
+ }
+
+ return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+ pos.Offset = s.srcPos.Offset - s.lastCharLen
+ switch {
+ case s.srcPos.Column > 0:
+ // common case: last character was not a '\n'
+ pos.Line = s.srcPos.Line
+ pos.Column = s.srcPos.Column
+ case s.lastLineLen > 0:
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ pos.Line = s.srcPos.Line - 1
+ pos.Column = s.lastLineLen
+ default:
+ // at the beginning of the source
+ pos.Line = 1
+ pos.Column = 1
+ }
+ return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+ s.ErrorCount++
+ pos := s.recentPosition()
+
+ if s.Error != nil {
+ s.Error(pos, msg)
+ return
+ }
+
+ fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isDigit returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isDecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+ return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
new file mode 100644
index 0000000000..5f981eaa2f
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
@@ -0,0 +1,241 @@
+package strconv
+
+import (
+ "errors"
+ "unicode/utf8"
+)
+
+// ErrSyntax indicates that a value does not have the right syntax for the target type.
+var ErrSyntax = errors.New("invalid syntax")
+
+// Unquote interprets s as a single-quoted, double-quoted,
+// or backquoted Go string literal, returning the string value
+// that s quotes. (If s is single-quoted, it would be a Go
+// character literal; Unquote returns the corresponding
+// one-character string.)
+func Unquote(s string) (t string, err error) {
+ n := len(s)
+ if n < 2 {
+ return "", ErrSyntax
+ }
+ quote := s[0]
+ if quote != s[n-1] {
+ return "", ErrSyntax
+ }
+ s = s[1 : n-1]
+
+ if quote != '"' {
+ return "", ErrSyntax
+ }
+ if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
+ return "", ErrSyntax
+ }
+
+ // Is it trivial? Avoid allocation.
+ if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
+ switch quote {
+ case '"':
+ return s, nil
+ case '\'':
+ r, size := utf8.DecodeRuneInString(s)
+ if size == len(s) && (r != utf8.RuneError || size != 1) {
+ return s, nil
+ }
+ }
+ }
+
+ var runeTmp [utf8.UTFMax]byte
+ buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
+ for len(s) > 0 {
+ // If we're starting a '${}' then let it through un-unquoted.
+ // Specifically: we don't unquote any characters within the `${}`
+ // section.
+ if s[0] == '$' && len(s) > 1 && s[1] == '{' {
+ buf = append(buf, '$', '{')
+ s = s[2:]
+
+ // Continue reading until we find the closing brace, copying as-is
+ braces := 1
+ for len(s) > 0 && braces > 0 {
+ r, size := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError {
+ return "", ErrSyntax
+ }
+
+ s = s[size:]
+
+ n := utf8.EncodeRune(runeTmp[:], r)
+ buf = append(buf, runeTmp[:n]...)
+
+ switch r {
+ case '{':
+ braces++
+ case '}':
+ braces--
+ }
+ }
+ if braces != 0 {
+ return "", ErrSyntax
+ }
+ if len(s) == 0 {
+ // If there's no string left, we're done!
+ break
+ } else {
+ // If there's more left, we need to pop back up to the top of the loop
+ // in case there's another interpolation in this string.
+ continue
+ }
+ }
+
+ if s[0] == '\n' {
+ return "", ErrSyntax
+ }
+
+ c, multibyte, ss, err := unquoteChar(s, quote)
+ if err != nil {
+ return "", err
+ }
+ s = ss
+ if c < utf8.RuneSelf || !multibyte {
+ buf = append(buf, byte(c))
+ } else {
+ n := utf8.EncodeRune(runeTmp[:], c)
+ buf = append(buf, runeTmp[:n]...)
+ }
+ if quote == '\'' && len(s) != 0 {
+ // single-quoted must be single character
+ return "", ErrSyntax
+ }
+ }
+ return string(buf), nil
+}
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] == c {
+ return true
+ }
+ }
+ return false
+}
+
+func unhex(b byte) (v rune, ok bool) {
+ c := rune(b)
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0', true
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10, true
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10, true
+ }
+ return
+}
+
+func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
+ // easy cases
+ switch c := s[0]; {
+ case c == quote && (quote == '\'' || quote == '"'):
+ err = ErrSyntax
+ return
+ case c >= utf8.RuneSelf:
+ r, size := utf8.DecodeRuneInString(s)
+ return r, true, s[size:], nil
+ case c != '\\':
+ return rune(s[0]), false, s[1:], nil
+ }
+
+ // hard case: c is backslash
+ if len(s) <= 1 {
+ err = ErrSyntax
+ return
+ }
+ c := s[1]
+ s = s[2:]
+
+ switch c {
+ case 'a':
+ value = '\a'
+ case 'b':
+ value = '\b'
+ case 'f':
+ value = '\f'
+ case 'n':
+ value = '\n'
+ case 'r':
+ value = '\r'
+ case 't':
+ value = '\t'
+ case 'v':
+ value = '\v'
+ case 'x', 'u', 'U':
+ n := 0
+ switch c {
+ case 'x':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ var v rune
+ if len(s) < n {
+ err = ErrSyntax
+ return
+ }
+ for j := 0; j < n; j++ {
+ x, ok := unhex(s[j])
+ if !ok {
+ err = ErrSyntax
+ return
+ }
+ v = v<<4 | x
+ }
+ s = s[n:]
+ if c == 'x' {
+ // single-byte string, possibly not UTF-8
+ value = v
+ break
+ }
+ if v > utf8.MaxRune {
+ err = ErrSyntax
+ return
+ }
+ value = v
+ multibyte = true
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ v := rune(c) - '0'
+ if len(s) < 2 {
+ err = ErrSyntax
+ return
+ }
+ for j := 0; j < 2; j++ { // one digit already; two more
+ x := rune(s[j]) - '0'
+ if x < 0 || x > 7 {
+ err = ErrSyntax
+ return
+ }
+ v = (v << 3) | x
+ }
+ s = s[2:]
+ if v > 255 {
+ err = ErrSyntax
+ return
+ }
+ value = v
+ case '\\':
+ value = '\\'
+ case '\'', '"':
+ if c != quote {
+ err = ErrSyntax
+ return
+ }
+ value = rune(c)
+ default:
+ err = ErrSyntax
+ return
+ }
+ tail = s
+ return
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go
new file mode 100644
index 0000000000..59c1bb72d4
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+ Filename string // filename, if any
+ Offset int // offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+// file:line:column valid position with file name
+// line:column valid position without file name
+// file invalid position with file name
+// - invalid position without file name
+func (p Pos) String() string {
+ s := p.Filename
+ if p.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+ return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+ return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go
new file mode 100644
index 0000000000..e37c0664ec
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go
@@ -0,0 +1,219 @@
+// Package token defines constants representing the lexical tokens for HCL
+// (HashiCorp Configuration Language)
+package token
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ hclstrconv "github.com/hashicorp/hcl/hcl/strconv"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+ Type Type
+ Pos Pos
+ Text string
+ JSON bool
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+ // Special tokens
+ ILLEGAL Type = iota
+ EOF
+ COMMENT
+
+ identifier_beg
+ IDENT // literals
+ literal_beg
+ NUMBER // 12345
+ FLOAT // 123.45
+ BOOL // true,false
+ STRING // "abc"
+ HEREDOC // < 0 {
+ // Pop the current item
+ n := len(frontier)
+ item := frontier[n-1]
+ frontier = frontier[:n-1]
+
+ switch v := item.Val.(type) {
+ case *ast.ObjectType:
+ items, frontier = flattenObjectType(v, item, items, frontier)
+ case *ast.ListType:
+ items, frontier = flattenListType(v, item, items, frontier)
+ default:
+ items = append(items, item)
+ }
+ }
+
+ // Reverse the list since the frontier model runs things backwards
+ for i := len(items)/2 - 1; i >= 0; i-- {
+ opp := len(items) - 1 - i
+ items[i], items[opp] = items[opp], items[i]
+ }
+
+ // Done! Set the original items
+ list.Items = items
+ return n, true
+ })
+}
+
+func flattenListType(
+ ot *ast.ListType,
+ item *ast.ObjectItem,
+ items []*ast.ObjectItem,
+ frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+ // If the list is empty, keep the original list
+ if len(ot.List) == 0 {
+ items = append(items, item)
+ return items, frontier
+ }
+
+ // All the elements of this object must also be objects!
+ for _, subitem := range ot.List {
+ if _, ok := subitem.(*ast.ObjectType); !ok {
+ items = append(items, item)
+ return items, frontier
+ }
+ }
+
+ // Great! We have a match go through all the items and flatten
+ for _, elem := range ot.List {
+ // Add it to the frontier so that we can recurse
+ frontier = append(frontier, &ast.ObjectItem{
+ Keys: item.Keys,
+ Assign: item.Assign,
+ Val: elem,
+ LeadComment: item.LeadComment,
+ LineComment: item.LineComment,
+ })
+ }
+
+ return items, frontier
+}
+
+func flattenObjectType(
+ ot *ast.ObjectType,
+ item *ast.ObjectItem,
+ items []*ast.ObjectItem,
+ frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+ // If the list has no items we do not have to flatten anything
+ if ot.List.Items == nil {
+ items = append(items, item)
+ return items, frontier
+ }
+
+ // All the elements of this object must also be objects!
+ for _, subitem := range ot.List.Items {
+ if _, ok := subitem.Val.(*ast.ObjectType); !ok {
+ items = append(items, item)
+ return items, frontier
+ }
+ }
+
+ // Great! We have a match go through all the items and flatten
+ for _, subitem := range ot.List.Items {
+ // Copy the new key
+ keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys))
+ copy(keys, item.Keys)
+ copy(keys[len(item.Keys):], subitem.Keys)
+
+ // Add it to the frontier so that we can recurse
+ frontier = append(frontier, &ast.ObjectItem{
+ Keys: keys,
+ Assign: item.Assign,
+ Val: subitem.Val,
+ LeadComment: item.LeadComment,
+ LineComment: item.LineComment,
+ })
+ }
+
+ return items, frontier
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
new file mode 100644
index 0000000000..125a5f0729
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
@@ -0,0 +1,313 @@
+package parser
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ hcltoken "github.com/hashicorp/hcl/hcl/token"
+ "github.com/hashicorp/hcl/json/scanner"
+ "github.com/hashicorp/hcl/json/token"
+)
+
+type Parser struct {
+ sc *scanner.Scanner
+
+ // Last read token
+ tok token.Token
+ commaPrev token.Token
+
+ enableTrace bool
+ indent int
+ n int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+ return &Parser{
+ sc: scanner.New(src),
+ }
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+ p := newParser(src)
+ return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+ f := &ast.File{}
+ var err, scerr error
+ p.sc.Error = func(pos token.Pos, msg string) {
+ scerr = fmt.Errorf("%s: %s", pos, msg)
+ }
+
+ // The root must be an object in JSON
+ object, err := p.object()
+ if scerr != nil {
+ return nil, scerr
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // We make our final node an object list so it is more HCL compatible
+ f.Node = object.List
+
+ // Flatten it, which finds patterns and turns them into more HCL-like
+ // AST trees.
+ flattenObjects(f.Node)
+
+ return f, nil
+}
+
+func (p *Parser) objectList() (*ast.ObjectList, error) {
+ defer un(trace(p, "ParseObjectList"))
+ node := &ast.ObjectList{}
+
+ for {
+ n, err := p.objectItem()
+ if err == errEofToken {
+ break // we are finished
+ }
+
+ // we don't return a nil node, because might want to use already
+ // collected items.
+ if err != nil {
+ return node, err
+ }
+
+ node.Add(n)
+
+ // Check for a followup comma. If it isn't a comma, then we're done
+ if tok := p.scan(); tok.Type != token.COMMA {
+ break
+ }
+ }
+
+ return node, nil
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+ defer un(trace(p, "ParseObjectItem"))
+
+ keys, err := p.objectKey()
+ if err != nil {
+ return nil, err
+ }
+
+ o := &ast.ObjectItem{
+ Keys: keys,
+ }
+
+ switch p.tok.Type {
+ case token.COLON:
+ pos := p.tok.Pos
+ o.Assign = hcltoken.Pos{
+ Filename: pos.Filename,
+ Offset: pos.Offset,
+ Line: pos.Line,
+ Column: pos.Column,
+ }
+
+ o.Val, err = p.objectValue()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+ keyCount := 0
+ keys := make([]*ast.ObjectKey, 0)
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.EOF:
+ return nil, errEofToken
+ case token.STRING:
+ keyCount++
+ keys = append(keys, &ast.ObjectKey{
+ Token: p.tok.HCLToken(),
+ })
+ case token.COLON:
+ // If we have a zero keycount it means that we never got
+ // an object key, i.e. `{ :`. This is a syntax error.
+ if keyCount == 0 {
+ return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+ }
+
+ // Done
+ return keys, nil
+ case token.ILLEGAL:
+ return nil, errors.New("illegal")
+ default:
+ return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+ }
+ }
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) objectValue() (ast.Node, error) {
+ defer un(trace(p, "ParseObjectValue"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
+ return p.literalType()
+ case token.LBRACE:
+ return p.objectType()
+ case token.LBRACK:
+ return p.listType()
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseType"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.LBRACE:
+ return p.objectType()
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseObjectType"))
+
+ // we assume that the currently scanned token is a LBRACE
+ o := &ast.ObjectType{}
+
+ l, err := p.objectList()
+
+ // if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+ // not a RBRACE, it's an syntax error and we just return it.
+ if err != nil && p.tok.Type != token.RBRACE {
+ return nil, err
+ }
+
+ o.List = l
+ return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+ defer un(trace(p, "ParseListType"))
+
+ // we assume that the currently scanned token is a LBRACK
+ l := &ast.ListType{}
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.STRING:
+ node, err := p.literalType()
+ if err != nil {
+ return nil, err
+ }
+
+ l.Add(node)
+ case token.COMMA:
+ continue
+ case token.LBRACE:
+ node, err := p.objectType()
+ if err != nil {
+ return nil, err
+ }
+
+ l.Add(node)
+ case token.BOOL:
+ // TODO(arslan) should we support? not supported by HCL yet
+ case token.LBRACK:
+ // TODO(arslan) should we support nested lists? Even though it's
+ // written in README of HCL, it's not a part of the grammar
+ // (not defined in parse.y)
+ case token.RBRACK:
+ // finished
+ return l, nil
+ default:
+ return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
+ }
+
+ }
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+ defer un(trace(p, "ParseLiteral"))
+
+ return &ast.LiteralType{
+ Token: p.tok.HCLToken(),
+ }, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead.
+func (p *Parser) scan() token.Token {
+ // If we have a token on the buffer, then return it.
+ if p.n != 0 {
+ p.n = 0
+ return p.tok
+ }
+
+ p.tok = p.sc.Scan()
+ return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+ p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+ i := 2 * p.indent
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+ p.indent--
+ p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
new file mode 100644
index 0000000000..fe3f0f0950
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
@@ -0,0 +1,451 @@
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/hashicorp/hcl/json/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+ buf *bytes.Buffer // Source buffer for advancing and scanning
+ src []byte // Source buffer for immutable access
+
+ // Source Position
+ srcPos token.Pos // current position
+ prevPos token.Pos // previous position, used for peek() method
+
+ lastCharLen int // length of last character in bytes
+ lastLineLen int // length of last line in characters (for correct column reporting)
+
+ tokStart int // token text start position
+ tokEnd int // token text end position
+
+ // Error is called for each error encountered. If no Error
+ // function is set, the error is reported to os.Stderr.
+ Error func(pos token.Pos, msg string)
+
+ // ErrorCount is incremented by one for each error encountered.
+ ErrorCount int
+
+ // tokPos is the start position of most recently scanned token; set by
+ // Scan. The Filename field is always left untouched by the Scanner. If
+ // an error is reported (via Error) and Position is invalid, the scanner is
+ // not inside a token.
+ tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+ // even though we accept a src, we read from a io.Reader compatible type
+ // (*bytes.Buffer). So in the future we might easily change it to streaming
+ // read.
+ b := bytes.NewBuffer(src)
+ s := &Scanner{
+ buf: b,
+ src: src,
+ }
+
+ // srcPosition always starts with 1
+ s.srcPos.Line = 1
+ return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+ ch, size, err := s.buf.ReadRune()
+ if err != nil {
+ // advance for error reporting
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ return eof
+ }
+
+ if ch == utf8.RuneError && size == 1 {
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ s.err("illegal UTF-8 encoding")
+ return ch
+ }
+
+ // remember last position
+ s.prevPos = s.srcPos
+
+ s.srcPos.Column++
+ s.lastCharLen = size
+ s.srcPos.Offset += size
+
+ if ch == '\n' {
+ s.srcPos.Line++
+ s.lastLineLen = s.srcPos.Column
+ s.srcPos.Column = 0
+ }
+
+ // debug
+ // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+ return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+ if err := s.buf.UnreadRune(); err != nil {
+ panic(err) // this is user fault, we should catch it
+ }
+ s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+ peek, _, err := s.buf.ReadRune()
+ if err != nil {
+ return eof
+ }
+
+ s.buf.UnreadRune()
+ return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+ ch := s.next()
+
+ // skip white space
+ for isWhitespace(ch) {
+ ch = s.next()
+ }
+
+ var tok token.Type
+
+ // token text markings
+ s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+ // token position, initial next() is moving the offset by one(size of rune
+ // actually), though we are interested with the starting point
+ s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+ if s.srcPos.Column > 0 {
+ // common case: last character was not a '\n'
+ s.tokPos.Line = s.srcPos.Line
+ s.tokPos.Column = s.srcPos.Column
+ } else {
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ s.tokPos.Line = s.srcPos.Line - 1
+ s.tokPos.Column = s.lastLineLen
+ }
+
+ switch {
+ case isLetter(ch):
+ lit := s.scanIdentifier()
+ if lit == "true" || lit == "false" {
+ tok = token.BOOL
+ } else if lit == "null" {
+ tok = token.NULL
+ } else {
+ s.err("illegal char")
+ }
+ case isDecimal(ch):
+ tok = s.scanNumber(ch)
+ default:
+ switch ch {
+ case eof:
+ tok = token.EOF
+ case '"':
+ tok = token.STRING
+ s.scanString()
+ case '.':
+ tok = token.PERIOD
+ ch = s.peek()
+ if isDecimal(ch) {
+ tok = token.FLOAT
+ ch = s.scanMantissa(ch)
+ ch = s.scanExponent(ch)
+ }
+ case '[':
+ tok = token.LBRACK
+ case ']':
+ tok = token.RBRACK
+ case '{':
+ tok = token.LBRACE
+ case '}':
+ tok = token.RBRACE
+ case ',':
+ tok = token.COMMA
+ case ':':
+ tok = token.COLON
+ case '-':
+ if isDecimal(s.peek()) {
+ ch := s.next()
+ tok = s.scanNumber(ch)
+ } else {
+ s.err("illegal char")
+ }
+ default:
+ s.err("illegal char: " + string(ch))
+ }
+ }
+
+ // finish token ending
+ s.tokEnd = s.srcPos.Offset
+
+ // create token literal
+ var tokenText string
+ if s.tokStart >= 0 {
+ tokenText = string(s.src[s.tokStart:s.tokEnd])
+ }
+ s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+ return token.Token{
+ Type: tok,
+ Pos: s.tokPos,
+ Text: tokenText,
+ }
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+ zero := ch == '0'
+ pos := s.srcPos
+
+ s.scanMantissa(ch)
+ ch = s.next() // seek forward
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+
+ // If we have a larger number and this is zero, error
+ if zero && pos != s.srcPos {
+ s.err("numbers cannot start with 0")
+ }
+
+ return token.NUMBER
+}
+
+// scanMantissa scans the mantissa beginning from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+ scanned := false
+ for isDecimal(ch) {
+ ch = s.next()
+ scanned = true
+ }
+
+ if scanned && ch != eof {
+ s.unread()
+ }
+ return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+ if ch == '.' {
+ ch = s.peek() // we peek just to see if we can move forward
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ if ch == '-' || ch == '+' {
+ ch = s.next()
+ }
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+ braces := 0
+ for {
+ // '"' opening already consumed
+ // read character after quote
+ ch := s.next()
+
+ if ch == '\n' || ch < 0 || ch == eof {
+ s.err("literal not terminated")
+ return
+ }
+
+ if ch == '"' {
+ break
+ }
+
+ // If we're going into a ${} then we can ignore quotes for awhile
+ if braces == 0 && ch == '$' && s.peek() == '{' {
+ braces++
+ s.next()
+ } else if braces > 0 && ch == '{' {
+ braces++
+ }
+ if braces > 0 && ch == '}' {
+ braces--
+ }
+
+ if ch == '\\' {
+ s.scanEscape()
+ }
+ }
+
+ return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+ // http://en.cppreference.com/w/cpp/language/escape
+ ch := s.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+ // nothing to do
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ // octal notation
+ ch = s.scanDigits(ch, 8, 3)
+ case 'x':
+ // hexademical notation
+ ch = s.scanDigits(s.next(), 16, 2)
+ case 'u':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 4)
+ case 'U':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 8)
+ default:
+ s.err("illegal char escape")
+ }
+ return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+ for n > 0 && digitVal(ch) < base {
+ ch = s.next()
+ n--
+ }
+ if n > 0 {
+ s.err("illegal char escape")
+ }
+
+ // we scanned all digits, put the last non digit char back
+ s.unread()
+ return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+ offs := s.srcPos.Offset - s.lastCharLen
+ ch := s.next()
+ for isLetter(ch) || isDigit(ch) || ch == '-' {
+ ch = s.next()
+ }
+
+ if ch != eof {
+ s.unread() // we got identifier, put back latest char
+ }
+
+ return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+ pos.Offset = s.srcPos.Offset - s.lastCharLen
+ switch {
+ case s.srcPos.Column > 0:
+ // common case: last character was not a '\n'
+ pos.Line = s.srcPos.Line
+ pos.Column = s.srcPos.Column
+ case s.lastLineLen > 0:
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ pos.Line = s.srcPos.Line - 1
+ pos.Column = s.lastLineLen
+ default:
+ // at the beginning of the source
+ pos.Line = 1
+ pos.Column = 1
+ }
+ return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+ s.ErrorCount++
+ pos := s.recentPosition()
+
+ if s.Error != nil {
+ s.Error(pos, msg)
+ return
+ }
+
+ fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+ return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go
new file mode 100644
index 0000000000..59c1bb72d4
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+ Filename string // filename, if any
+ Offset int // offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+// file:line:column valid position with file name
+// line:column valid position without file name
+// file invalid position with file name
+// - invalid position without file name
+func (p Pos) String() string {
+ s := p.Filename
+ if p.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+ return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+ return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go
new file mode 100644
index 0000000000..95a0c3eee6
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/token.go
@@ -0,0 +1,118 @@
+package token
+
+import (
+ "fmt"
+ "strconv"
+
+ hcltoken "github.com/hashicorp/hcl/hcl/token"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+ Type Type
+ Pos Pos
+ Text string
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+ // Special tokens
+ ILLEGAL Type = iota
+ EOF
+
+ identifier_beg
+ literal_beg
+ NUMBER // 12345
+ FLOAT // 123.45
+ BOOL // true,false
+ STRING // "abc"
+ NULL // null
+ literal_end
+ identifier_end
+
+ operator_beg
+ LBRACK // [
+ LBRACE // {
+ COMMA // ,
+ PERIOD // .
+ COLON // :
+
+ RBRACK // ]
+ RBRACE // }
+
+ operator_end
+)
+
+var tokens = [...]string{
+ ILLEGAL: "ILLEGAL",
+
+ EOF: "EOF",
+
+ NUMBER: "NUMBER",
+ FLOAT: "FLOAT",
+ BOOL: "BOOL",
+ STRING: "STRING",
+ NULL: "NULL",
+
+ LBRACK: "LBRACK",
+ LBRACE: "LBRACE",
+ COMMA: "COMMA",
+ PERIOD: "PERIOD",
+ COLON: "COLON",
+
+ RBRACK: "RBRACK",
+ RBRACE: "RBRACE",
+}
+
+// String returns the string corresponding to the token tok.
+func (t Type) String() string {
+ s := ""
+ if 0 <= t && t < Type(len(tokens)) {
+ s = tokens[t]
+ }
+ if s == "" {
+ s = "token(" + strconv.Itoa(int(t)) + ")"
+ }
+ return s
+}
+
+// IsIdentifier returns true for tokens corresponding to identifiers and basic
+// type literals; it returns false otherwise.
+func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
+
+// IsLiteral returns true for tokens corresponding to basic type literals; it
+// returns false otherwise.
+func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
+
+// String returns the token's literal text. Note that this is only
+// applicable for certain token types, such as token.IDENT,
+// token.STRING, etc..
+func (t Token) String() string {
+ return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
+}
+
+// HCLToken converts this token to an HCL token.
+//
+// The token type must be a literal type or this will panic.
+func (t Token) HCLToken() hcltoken.Token {
+ switch t.Type {
+ case BOOL:
+ return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
+ case FLOAT:
+ return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
+ case NULL:
+ return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
+ case NUMBER:
+ return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
+ case STRING:
+ return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
+ default:
+ panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go
new file mode 100644
index 0000000000..d9993c2928
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/lex.go
@@ -0,0 +1,38 @@
+package hcl
+
+import (
+ "unicode"
+ "unicode/utf8"
+)
+
+type lexModeValue byte
+
+const (
+ lexModeUnknown lexModeValue = iota
+ lexModeHcl
+ lexModeJson
+)
+
+// lexMode returns whether we're going to be parsing in JSON
+// mode or HCL mode.
+func lexMode(v []byte) lexModeValue {
+ var (
+ r rune
+ w int
+ offset int
+ )
+
+ for {
+ r, w = utf8.DecodeRune(v[offset:])
+ offset += w
+ if unicode.IsSpace(r) {
+ continue
+ }
+ if r == '{' {
+ return lexModeJson
+ }
+ break
+ }
+
+ return lexModeHcl
+}
diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go
new file mode 100644
index 0000000000..1fca53c4ce
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/parse.go
@@ -0,0 +1,39 @@
+package hcl
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ hclParser "github.com/hashicorp/hcl/hcl/parser"
+ jsonParser "github.com/hashicorp/hcl/json/parser"
+)
+
+// ParseBytes accepts as input byte slice and returns ast tree.
+//
+// Input can be either JSON or HCL
+func ParseBytes(in []byte) (*ast.File, error) {
+ return parse(in)
+}
+
+// ParseString accepts input as a string and returns ast tree.
+func ParseString(input string) (*ast.File, error) {
+ return parse([]byte(input))
+}
+
+func parse(in []byte) (*ast.File, error) {
+ switch lexMode(in) {
+ case lexModeHcl:
+ return hclParser.Parse(in)
+ case lexModeJson:
+ return jsonParser.Parse(in)
+ }
+
+ return nil, fmt.Errorf("unknown config format")
+}
+
+// Parse parses the given input and returns the root object.
+//
+// The input format can be either HCL or JSON.
+func Parse(input string) (*ast.File, error) {
+ return parse([]byte(input))
+}
diff --git a/vendor/github.com/hectane/go-acl/LICENSE.txt b/vendor/github.com/hectane/go-acl/LICENSE.txt
new file mode 100644
index 0000000000..fb72c83a66
--- /dev/null
+++ b/vendor/github.com/hectane/go-acl/LICENSE.txt
@@ -0,0 +1,9 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Nathan Osman
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/hectane/go-acl/README.md b/vendor/github.com/hectane/go-acl/README.md
new file mode 100644
index 0000000000..58adf9762c
--- /dev/null
+++ b/vendor/github.com/hectane/go-acl/README.md
@@ -0,0 +1,66 @@
+## go-acl
+
+[![Build status](https://ci.appveyor.com/api/projects/status/rbdyu7c39o2j0ru9?svg=true)](https://ci.appveyor.com/project/nathan-osman/go-acl)
+[![GoDoc](https://godoc.org/github.com/hectane/go-acl?status.svg)](https://godoc.org/github.com/hectane/go-acl)
+[![MIT License](http://img.shields.io/badge/license-MIT-9370d8.svg?style=flat)](http://opensource.org/licenses/MIT)
+
+Manipulating ACLs (Access Control Lists) on Windows is difficult. go-acl wraps the Windows API functions that control access to objects, simplifying the process.
+
+### Using the Package
+
+To use the package add the following imports:
+
+ import (
+ "github.com/hectane/go-acl"
+ "golang.org/x/sys/windows"
+ )
+
+### Examples
+
+Probably the most commonly used function in this package is `Chmod`:
+
+ if err := acl.Chmod("C:\\path\\to\\file.txt", 0755); err != nil {
+ panic(err)
+ }
+
+To grant read access to user "Alice" and deny write access to user "Bob":
+
+ if err := acl.Apply(
+ "C:\\path\\to\\file.txt",
+ false,
+ false,
+ acl.GrantName(windows.GENERIC_READ, "Alice"),
+ acl.DenyName(windows.GENERIC_WRITE, "Bob"),
+ ); err != nil {
+ panic(err)
+ }
+
+### Using the API Directly
+
+go-acl's `api` package exposes the individual Windows API functions that are used to manipulate ACLs. For example, to retrieve the current owner of a file:
+
+ import (
+ "github.com/hectane/go-acl/api"
+ "golang.org/x/sys/windows"
+ )
+
+ var (
+ owner *windows.SID
+ secDesc windows.Handle
+ )
+ err := api.GetNamedSecurityInfo(
+ "C:\\path\\to\\file.txt",
+ api.SE_FILE_OBJECT,
+ api.OWNER_SECURITY_INFORMATION,
+ &owner,
+ nil,
+ nil,
+ nil,
+ &secDesc,
+ )
+ if err != nil {
+ panic(err)
+ }
+ defer windows.LocalFree(secDesc)
+
+`owner` will then point to the SID for the owner of the file.
diff --git a/vendor/github.com/hectane/go-acl/api/acl.go b/vendor/github.com/hectane/go-acl/api/acl.go
new file mode 100644
index 0000000000..756f56ad5c
--- /dev/null
+++ b/vendor/github.com/hectane/go-acl/api/acl.go
@@ -0,0 +1,98 @@
+//+build windows
+
+package api
+
+import (
+ "golang.org/x/sys/windows"
+
+ "unsafe"
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa379284.aspx
+const (
+ NO_MULTIPLE_TRUSTEE = iota
+ TRUSTEE_IS_IMPERSONATE
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa379638.aspx
+const (
+ TRUSTEE_IS_SID = iota
+ TRUSTEE_IS_NAME
+ TRUSTEE_BAD_FORM
+ TRUSTEE_IS_OBJECTS_AND_SID
+ TRUSTEE_IS_OBJECTS_AND_NAME
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa379639.aspx
+const (
+ TRUSTEE_IS_UNKNOWN = iota
+ TRUSTEE_IS_USER
+ TRUSTEE_IS_GROUP
+ TRUSTEE_IS_DOMAIN
+ TRUSTEE_IS_ALIAS
+ TRUSTEE_IS_WELL_KNOWN_GROUP
+ TRUSTEE_IS_DELETED
+ TRUSTEE_IS_INVALID
+ TRUSTEE_IS_COMPUTER
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa374899.aspx
+const (
+ NOT_USED_ACCESS = iota
+ GRANT_ACCESS
+ SET_ACCESS
+ DENY_ACCESS
+ REVOKE_ACCESS
+ SET_AUDIT_SUCCESS
+ SET_AUDIT_FAILURE
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa446627.aspx
+const (
+ NO_INHERITANCE = 0x0
+ SUB_OBJECTS_ONLY_INHERIT = 0x1
+ SUB_CONTAINERS_ONLY_INHERIT = 0x2
+ SUB_CONTAINERS_AND_OBJECTS_INHERIT = 0x3
+ INHERIT_NO_PROPAGATE = 0x4
+ INHERIT_ONLY = 0x8
+
+ OBJECT_INHERIT_ACE = 0x1
+ CONTAINER_INHERIT_ACE = 0x2
+ NO_PROPAGATE_INHERIT_ACE = 0x4
+ INHERIT_ONLY_ACE = 0x8
+)
+
+var (
+ procSetEntriesInAclW = advapi32.MustFindProc("SetEntriesInAclW")
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa379636.aspx
+type Trustee struct {
+ MultipleTrustee *Trustee
+ MultipleTrusteeOperation int32
+ TrusteeForm int32
+ TrusteeType int32
+ Name *uint16
+}
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa446627.aspx
+type ExplicitAccess struct {
+ AccessPermissions uint32
+ AccessMode int32
+ Inheritance uint32
+ Trustee Trustee
+}
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa379576.aspx
+func SetEntriesInAcl(entries []ExplicitAccess, oldAcl windows.Handle, newAcl *windows.Handle) error {
+ ret, _, err := procSetEntriesInAclW.Call(
+ uintptr(len(entries)),
+ uintptr(unsafe.Pointer(&entries[0])),
+ uintptr(oldAcl),
+ uintptr(unsafe.Pointer(newAcl)),
+ )
+ if ret != 0 {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/hectane/go-acl/api/api.go b/vendor/github.com/hectane/go-acl/api/api.go
new file mode 100644
index 0000000000..371dd2d8cd
--- /dev/null
+++ b/vendor/github.com/hectane/go-acl/api/api.go
@@ -0,0 +1,10 @@
+//+build windows
+
+// Windows API functions for manipulating ACLs.
+package api
+
+import (
+ "golang.org/x/sys/windows"
+)
+
+var advapi32 = windows.MustLoadDLL("advapi32.dll")
diff --git a/vendor/github.com/hectane/go-acl/api/posix.go b/vendor/github.com/hectane/go-acl/api/posix.go
new file mode 100644
index 0000000000..2c199fd8ef
--- /dev/null
+++ b/vendor/github.com/hectane/go-acl/api/posix.go
@@ -0,0 +1,3 @@
+//+build !windows
+
+package api
diff --git a/vendor/github.com/hectane/go-acl/api/secinfo.go b/vendor/github.com/hectane/go-acl/api/secinfo.go
new file mode 100644
index 0000000000..6b3c44105e
--- /dev/null
+++ b/vendor/github.com/hectane/go-acl/api/secinfo.go
@@ -0,0 +1,84 @@
+//+build windows
+
+package api
+
+import (
+ "golang.org/x/sys/windows"
+
+ "unsafe"
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa379593.aspx
+const (
+ SE_UNKNOWN_OBJECT_TYPE = iota
+ SE_FILE_OBJECT
+ SE_SERVICE
+ SE_PRINTER
+ SE_REGISTRY_KEY
+ SE_LMSHARE
+ SE_KERNEL_OBJECT
+ SE_WINDOW_OBJECT
+ SE_DS_OBJECT
+ SE_DS_OBJECT_ALL
+ SE_PROVIDER_DEFINED_OBJECT
+ SE_WMIGUID_OBJECT
+ SE_REGISTRY_WOW64_32KEY
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa379573.aspx
+const (
+ OWNER_SECURITY_INFORMATION = 0x00001
+ GROUP_SECURITY_INFORMATION = 0x00002
+ DACL_SECURITY_INFORMATION = 0x00004
+ SACL_SECURITY_INFORMATION = 0x00008
+ LABEL_SECURITY_INFORMATION = 0x00010
+ ATTRIBUTE_SECURITY_INFORMATION = 0x00020
+ SCOPE_SECURITY_INFORMATION = 0x00040
+ PROCESS_TRUST_LABEL_SECURITY_INFORMATION = 0x00080
+ BACKUP_SECURITY_INFORMATION = 0x10000
+
+ PROTECTED_DACL_SECURITY_INFORMATION = 0x80000000
+ PROTECTED_SACL_SECURITY_INFORMATION = 0x40000000
+ UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000
+ UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000
+)
+
+var (
+ procGetNamedSecurityInfoW = advapi32.MustFindProc("GetNamedSecurityInfoW")
+ procSetNamedSecurityInfoW = advapi32.MustFindProc("SetNamedSecurityInfoW")
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa446645.aspx
+func GetNamedSecurityInfo(objectName string, objectType int32, secInfo uint32, owner, group **windows.SID, dacl, sacl, secDesc *windows.Handle) error {
+ ret, _, err := procGetNamedSecurityInfoW.Call(
+ uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(objectName))),
+ uintptr(objectType),
+ uintptr(secInfo),
+ uintptr(unsafe.Pointer(owner)),
+ uintptr(unsafe.Pointer(group)),
+ uintptr(unsafe.Pointer(dacl)),
+ uintptr(unsafe.Pointer(sacl)),
+ uintptr(unsafe.Pointer(secDesc)),
+ )
+ if ret != 0 {
+ return err
+ }
+ return nil
+}
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa379579.aspx
+func SetNamedSecurityInfo(objectName string, objectType int32, secInfo uint32, owner, group *windows.SID, dacl, sacl windows.Handle) error {
+ ret, _, err := procSetNamedSecurityInfoW.Call(
+ uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(objectName))),
+ uintptr(objectType),
+ uintptr(secInfo),
+ uintptr(unsafe.Pointer(owner)),
+ uintptr(unsafe.Pointer(group)),
+ uintptr(dacl),
+ uintptr(sacl),
+ )
+ if ret != 0 {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/hectane/go-acl/api/sid.go b/vendor/github.com/hectane/go-acl/api/sid.go
new file mode 100644
index 0000000000..4ecc0869c4
--- /dev/null
+++ b/vendor/github.com/hectane/go-acl/api/sid.go
@@ -0,0 +1,131 @@
+//+build windows
+
+package api
+
+import (
+ "golang.org/x/sys/windows"
+
+ "unsafe"
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ee207397.aspx
+const (
+ SECURITY_MAX_SID_SIZE = 68
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa379650.aspx
+const (
+ WinNullSid = 0
+ WinWorldSid = 1
+ WinLocalSid = 2
+ WinCreatorOwnerSid = 3
+ WinCreatorGroupSid = 4
+ WinCreatorOwnerServerSid = 5
+ WinCreatorGroupServerSid = 6
+ WinNtAuthoritySid = 7
+ WinDialupSid = 8
+ WinNetworkSid = 9
+ WinBatchSid = 10
+ WinInteractiveSid = 11
+ WinServiceSid = 12
+ WinAnonymousSid = 13
+ WinProxySid = 14
+ WinEnterpriseControllersSid = 15
+ WinSelfSid = 16
+ WinAuthenticatedUserSid = 17
+ WinRestrictedCodeSid = 18
+ WinTerminalServerSid = 19
+ WinRemoteLogonIdSid = 20
+ WinLogonIdsSid = 21
+ WinLocalSystemSid = 22
+ WinLocalServiceSid = 23
+ WinNetworkServiceSid = 24
+ WinBuiltinDomainSid = 25
+ WinBuiltinAdministratorsSid = 26
+ WinBuiltinUsersSid = 27
+ WinBuiltinGuestsSid = 28
+ WinBuiltinPowerUsersSid = 29
+ WinBuiltinAccountOperatorsSid = 30
+ WinBuiltinSystemOperatorsSid = 31
+ WinBuiltinPrintOperatorsSid = 32
+ WinBuiltinBackupOperatorsSid = 33
+ WinBuiltinReplicatorSid = 34
+ WinBuiltinPreWindows2000CompatibleAccessSid = 35
+ WinBuiltinRemoteDesktopUsersSid = 36
+ WinBuiltinNetworkConfigurationOperatorsSid = 37
+ WinAccountAdministratorSid = 38
+ WinAccountGuestSid = 39
+ WinAccountKrbtgtSid = 40
+ WinAccountDomainAdminsSid = 41
+ WinAccountDomainUsersSid = 42
+ WinAccountDomainGuestsSid = 43
+ WinAccountComputersSid = 44
+ WinAccountControllersSid = 45
+ WinAccountCertAdminsSid = 46
+ WinAccountSchemaAdminsSid = 47
+ WinAccountEnterpriseAdminsSid = 48
+ WinAccountPolicyAdminsSid = 49
+ WinAccountRasAndIasServersSid = 50
+ WinNTLMAuthenticationSid = 51
+ WinDigestAuthenticationSid = 52
+ WinSChannelAuthenticationSid = 53
+ WinThisOrganizationSid = 54
+ WinOtherOrganizationSid = 55
+ WinBuiltinIncomingForestTrustBuildersSid = 56
+ WinBuiltinPerfMonitoringUsersSid = 57
+ WinBuiltinPerfLoggingUsersSid = 58
+ WinBuiltinAuthorizationAccessSid = 59
+ WinBuiltinTerminalServerLicenseServersSid = 60
+ WinBuiltinDCOMUsersSid = 61
+ WinBuiltinIUsersSid = 62
+ WinIUserSid = 63
+ WinBuiltinCryptoOperatorsSid = 64
+ WinUntrustedLabelSid = 65
+ WinLowLabelSid = 66
+ WinMediumLabelSid = 67
+ WinHighLabelSid = 68
+ WinSystemLabelSid = 69
+ WinWriteRestrictedCodeSid = 70
+ WinCreatorOwnerRightsSid = 71
+ WinCacheablePrincipalsGroupSid = 72
+ WinNonCacheablePrincipalsGroupSid = 73
+ WinEnterpriseReadonlyControllersSid = 74
+ WinAccountReadonlyControllersSid = 75
+ WinBuiltinEventLogReadersGroup = 76
+ WinNewEnterpriseReadonlyControllersSid = 77
+ WinBuiltinCertSvcDComAccessGroup = 78
+ WinMediumPlusLabelSid = 79
+ WinLocalLogonSid = 80
+ WinConsoleLogonSid = 81
+ WinThisOrganizationCertificateSid = 82
+ WinApplicationPackageAuthoritySid = 83
+ WinBuiltinAnyPackageSid = 84
+ WinCapabilityInternetClientSid = 85
+ WinCapabilityInternetClientServerSid = 86
+ WinCapabilityPrivateNetworkClientServerSid = 87
+ WinCapabilityPicturesLibrarySid = 88
+ WinCapabilityVideosLibrarySid = 89
+ WinCapabilityMusicLibrarySid = 90
+ WinCapabilityDocumentsLibrarySid = 91
+ WinCapabilitySharedUserCertificatesSid = 92
+ WinCapabilityEnterpriseAuthenticationSid = 93
+ WinCapabilityRemovableStorageSid = 94
+)
+
+var (
+ procCreateWellKnownSid = advapi32.MustFindProc("CreateWellKnownSid")
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa446585.aspx
+func CreateWellKnownSid(sidType int32, sidDomain, sid *windows.SID, sidLen *uint32) error {
+ ret, _, err := procCreateWellKnownSid.Call(
+ uintptr(sidType),
+ uintptr(unsafe.Pointer(sidDomain)),
+ uintptr(unsafe.Pointer(sid)),
+ uintptr(unsafe.Pointer(sidLen)),
+ )
+ if ret == 0 {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/hectane/go-acl/apply.go b/vendor/github.com/hectane/go-acl/apply.go
new file mode 100644
index 0000000000..6f4b55f61a
--- /dev/null
+++ b/vendor/github.com/hectane/go-acl/apply.go
@@ -0,0 +1,55 @@
+//+build windows
+
+package acl
+
+import (
+ "github.com/hectane/go-acl/api"
+ "golang.org/x/sys/windows"
+
+ "unsafe"
+)
+
+// Apply the provided access control entries to a file. If the replace
+// parameter is true, existing entries will be overwritten. If the inherit
+// parameter is true, the file will inherit ACEs from its parent.
+func Apply(name string, replace, inherit bool, entries ...api.ExplicitAccess) error {
+ var oldAcl windows.Handle
+ if !replace {
+ var secDesc windows.Handle
+ api.GetNamedSecurityInfo(
+ name,
+ api.SE_FILE_OBJECT,
+ api.DACL_SECURITY_INFORMATION,
+ nil,
+ nil,
+ &oldAcl,
+ nil,
+ &secDesc,
+ )
+ defer windows.LocalFree(secDesc)
+ }
+ var acl windows.Handle
+ if err := api.SetEntriesInAcl(
+ entries,
+ oldAcl,
+ &acl,
+ ); err != nil {
+ return err
+ }
+ defer windows.LocalFree((windows.Handle)(unsafe.Pointer(acl)))
+ var secInfo uint32
+ if !inherit {
+ secInfo = api.PROTECTED_DACL_SECURITY_INFORMATION
+ } else {
+ secInfo = api.UNPROTECTED_DACL_SECURITY_INFORMATION
+ }
+ return api.SetNamedSecurityInfo(
+ name,
+ api.SE_FILE_OBJECT,
+ api.DACL_SECURITY_INFORMATION|secInfo,
+ nil,
+ nil,
+ acl,
+ 0,
+ )
+}
diff --git a/vendor/github.com/hectane/go-acl/appveyor.yml b/vendor/github.com/hectane/go-acl/appveyor.yml
new file mode 100644
index 0000000000..068e79124f
--- /dev/null
+++ b/vendor/github.com/hectane/go-acl/appveyor.yml
@@ -0,0 +1,16 @@
+version: '{build}'
+
+clone_folder: C:\gopath\src\github.com\hectane\go-acl
+
+environment:
+ GOPATH: C:\gopath
+
+install:
+ - go version
+ - go env
+ - go get -t -v ./...
+
+build: off
+
+test_script:
+ - go test -v ./...
diff --git a/vendor/github.com/hectane/go-acl/chmod.go b/vendor/github.com/hectane/go-acl/chmod.go
new file mode 100644
index 0000000000..a0c9ad4271
--- /dev/null
+++ b/vendor/github.com/hectane/go-acl/chmod.go
@@ -0,0 +1,38 @@
+//+build windows
+
+package acl
+
+import (
+ "os"
+
+ "golang.org/x/sys/windows"
+)
+
+// Change the permissions of the specified file. Only the nine
+// least-significant bytes are used, allowing access by the file's owner, the
+// file's group, and everyone else to be explicitly controlled.
+func Chmod(name string, fileMode os.FileMode) error {
+ // https://support.microsoft.com/en-us/help/243330/well-known-security-identifiers-in-windows-operating-systems
+ creatorOwnerSID, err := windows.StringToSid("S-1-3-0")
+ if err != nil {
+ return err
+ }
+ creatorGroupSID, err := windows.StringToSid("S-1-3-1")
+ if err != nil {
+ return err
+ }
+ everyoneSID, err := windows.StringToSid("S-1-1-0")
+ if err != nil {
+ return err
+ }
+
+ mode := uint32(fileMode)
+ return Apply(
+ name,
+ true,
+ false,
+ GrantSid(((mode&0700)<<23)|((mode&0200)<<9), creatorOwnerSID),
+ GrantSid(((mode&0070)<<26)|((mode&0020)<<12), creatorGroupSID),
+ GrantSid(((mode&0007)<<29)|((mode&0002)<<15), everyoneSID),
+ )
+}
diff --git a/vendor/github.com/hectane/go-acl/posix.go b/vendor/github.com/hectane/go-acl/posix.go
new file mode 100644
index 0000000000..c45a3600cd
--- /dev/null
+++ b/vendor/github.com/hectane/go-acl/posix.go
@@ -0,0 +1,8 @@
+//+build !windows
+
+package acl
+
+import "os"
+
+// Chmod is os.Chmod.
+var Chmod = os.Chmod
diff --git a/vendor/github.com/hectane/go-acl/util.go b/vendor/github.com/hectane/go-acl/util.go
new file mode 100644
index 0000000000..fc02c5e726
--- /dev/null
+++ b/vendor/github.com/hectane/go-acl/util.go
@@ -0,0 +1,62 @@
+//+build windows
+
+package acl
+
+import (
+ "github.com/hectane/go-acl/api"
+ "golang.org/x/sys/windows"
+
+ "unsafe"
+)
+
+// Create an ExplicitAccess instance granting permissions to the provided SID.
+func GrantSid(accessPermissions uint32, sid *windows.SID) api.ExplicitAccess {
+ return api.ExplicitAccess{
+ AccessPermissions: accessPermissions,
+ AccessMode: api.GRANT_ACCESS,
+ Inheritance: api.SUB_CONTAINERS_AND_OBJECTS_INHERIT,
+ Trustee: api.Trustee{
+ TrusteeForm: api.TRUSTEE_IS_SID,
+ Name: (*uint16)(unsafe.Pointer(sid)),
+ },
+ }
+}
+
+// Create an ExplicitAccess instance granting permissions to the provided name.
+func GrantName(accessPermissions uint32, name string) api.ExplicitAccess {
+ return api.ExplicitAccess{
+ AccessPermissions: accessPermissions,
+ AccessMode: api.GRANT_ACCESS,
+ Inheritance: api.SUB_CONTAINERS_AND_OBJECTS_INHERIT,
+ Trustee: api.Trustee{
+ TrusteeForm: api.TRUSTEE_IS_NAME,
+ Name: windows.StringToUTF16Ptr(name),
+ },
+ }
+}
+
+// Create an ExplicitAccess instance denying permissions to the provided SID.
+func DenySid(accessPermissions uint32, sid *windows.SID) api.ExplicitAccess {
+ return api.ExplicitAccess{
+ AccessPermissions: accessPermissions,
+ AccessMode: api.DENY_ACCESS,
+ Inheritance: api.SUB_CONTAINERS_AND_OBJECTS_INHERIT,
+ Trustee: api.Trustee{
+ TrusteeForm: api.TRUSTEE_IS_SID,
+ Name: (*uint16)(unsafe.Pointer(sid)),
+ },
+ }
+}
+
+// Create an ExplicitAccess instance denying permissions to the provided name.
+func DenyName(accessPermissions uint32, name string) api.ExplicitAccess {
+ return api.ExplicitAccess{
+ AccessPermissions: accessPermissions,
+ AccessMode: api.DENY_ACCESS,
+ Inheritance: api.SUB_CONTAINERS_AND_OBJECTS_INHERIT,
+ Trustee: api.Trustee{
+ TrusteeForm: api.TRUSTEE_IS_NAME,
+ Name: windows.StringToUTF16Ptr(name),
+ },
+ }
+}
diff --git a/vendor/github.com/influxdata/go-syslog/v3/rfc3164/machine.go b/vendor/github.com/influxdata/go-syslog/v3/rfc3164/machine.go
deleted file mode 100644
index 6dd3d29730..0000000000
--- a/vendor/github.com/influxdata/go-syslog/v3/rfc3164/machine.go
+++ /dev/null
@@ -1,7263 +0,0 @@
-package rfc3164
-
-import (
- "fmt"
- "time"
-
- "github.com/influxdata/go-syslog/v3"
- "github.com/influxdata/go-syslog/v3/common"
-)
-
-var (
- errPrival = "expecting a priority value in the range 1-191 or equal to 0 [col %d]"
- errPri = "expecting a priority value within angle brackets [col %d]"
- errTimestamp = "expecting a Stamp timestamp [col %d]"
- errRFC3339 = "expecting a Stamp or a RFC3339 timestamp [col %d]"
- errHostname = "expecting an hostname (from 1 to max 255 US-ASCII characters) [col %d]"
- errTag = "expecting an alphanumeric tag (max 32 characters) [col %d]"
- errContentStart = "expecting a content part starting with a non-alphanumeric character [col %d]"
- errContent = "expecting a content part composed by visible characters only [col %d]"
- errParse = "parsing error [col %d]"
-)
-
-const start int = 1
-const firstFinal int = 333
-
-const enFail int = 373
-const enMain int = 1
-
-type machine struct {
- data []byte
- cs int
- p, pe, eof int
- pb int
- err error
- bestEffort bool
- yyyy int
- rfc3339 bool
- loc *time.Location
- timezone *time.Location
-}
-
-// NewMachine creates a new FSM able to parse RFC3164 syslog messages.
-func NewMachine(options ...syslog.MachineOption) syslog.Machine {
- m := &machine{}
-
- for _, opt := range options {
- opt(m)
- }
-
- return m
-}
-
-// WithBestEffort enables best effort mode.
-func (m *machine) WithBestEffort() {
- m.bestEffort = true
-}
-
-// HasBestEffort tells whether the receiving machine has best effort mode on or off.
-func (m *machine) HasBestEffort() bool {
- return m.bestEffort
-}
-
-// WithYear sets the year for the Stamp timestamp of the RFC 3164 syslog message.
-func (m *machine) WithYear(o YearOperator) {
- m.yyyy = YearOperation{o}.Operate()
-}
-
-// WithTimezone sets the time zone for the Stamp timestamp of the RFC 3164 syslog message.
-func (m *machine) WithTimezone(loc *time.Location) {
- m.loc = loc
-}
-
-// WithLocaleTimezone sets the locale time zone for the Stamp timestamp of the RFC 3164 syslog message.
-func (m *machine) WithLocaleTimezone(loc *time.Location) {
- m.timezone = loc
-}
-
-// WithRFC3339 enables ability to ALSO match RFC3339 timestamps.
-//
-// Notice this does not disable the default and correct timestamps - ie., Stamp timestamps.
-func (m *machine) WithRFC3339() {
- m.rfc3339 = true
-}
-
-// Err returns the error that occurred on the last call to Parse.
-//
-// If the result is nil, then the line was parsed successfully.
-func (m *machine) Err() error {
- return m.err
-}
-
-func (m *machine) text() []byte {
- return m.data[m.pb:m.p]
-}
-
-// Parse parses the input byte array as a RFC3164 syslog message.
-func (m *machine) Parse(input []byte) (syslog.Message, error) {
- m.data = input
- m.p = 0
- m.pb = 0
- m.pe = len(input)
- m.eof = len(input)
- m.err = nil
- output := &syslogMessage{}
-
- {
- m.cs = start
- }
-
- {
- var _widec int16
- if (m.p) == (m.pe) {
- goto _testEof
- }
- switch m.cs {
- case 1:
- goto stCase1
- case 0:
- goto stCase0
- case 2:
- goto stCase2
- case 3:
- goto stCase3
- case 4:
- goto stCase4
- case 5:
- goto stCase5
- case 6:
- goto stCase6
- case 7:
- goto stCase7
- case 8:
- goto stCase8
- case 9:
- goto stCase9
- case 10:
- goto stCase10
- case 11:
- goto stCase11
- case 12:
- goto stCase12
- case 13:
- goto stCase13
- case 14:
- goto stCase14
- case 15:
- goto stCase15
- case 16:
- goto stCase16
- case 17:
- goto stCase17
- case 18:
- goto stCase18
- case 19:
- goto stCase19
- case 20:
- goto stCase20
- case 21:
- goto stCase21
- case 22:
- goto stCase22
- case 333:
- goto stCase333
- case 334:
- goto stCase334
- case 335:
- goto stCase335
- case 336:
- goto stCase336
- case 337:
- goto stCase337
- case 338:
- goto stCase338
- case 339:
- goto stCase339
- case 340:
- goto stCase340
- case 341:
- goto stCase341
- case 342:
- goto stCase342
- case 343:
- goto stCase343
- case 344:
- goto stCase344
- case 345:
- goto stCase345
- case 346:
- goto stCase346
- case 347:
- goto stCase347
- case 348:
- goto stCase348
- case 349:
- goto stCase349
- case 350:
- goto stCase350
- case 351:
- goto stCase351
- case 352:
- goto stCase352
- case 353:
- goto stCase353
- case 354:
- goto stCase354
- case 355:
- goto stCase355
- case 356:
- goto stCase356
- case 357:
- goto stCase357
- case 358:
- goto stCase358
- case 359:
- goto stCase359
- case 360:
- goto stCase360
- case 361:
- goto stCase361
- case 362:
- goto stCase362
- case 363:
- goto stCase363
- case 364:
- goto stCase364
- case 365:
- goto stCase365
- case 366:
- goto stCase366
- case 367:
- goto stCase367
- case 368:
- goto stCase368
- case 23:
- goto stCase23
- case 24:
- goto stCase24
- case 25:
- goto stCase25
- case 26:
- goto stCase26
- case 369:
- goto stCase369
- case 370:
- goto stCase370
- case 371:
- goto stCase371
- case 372:
- goto stCase372
- case 27:
- goto stCase27
- case 28:
- goto stCase28
- case 29:
- goto stCase29
- case 30:
- goto stCase30
- case 31:
- goto stCase31
- case 32:
- goto stCase32
- case 33:
- goto stCase33
- case 34:
- goto stCase34
- case 35:
- goto stCase35
- case 36:
- goto stCase36
- case 37:
- goto stCase37
- case 38:
- goto stCase38
- case 39:
- goto stCase39
- case 40:
- goto stCase40
- case 41:
- goto stCase41
- case 42:
- goto stCase42
- case 43:
- goto stCase43
- case 44:
- goto stCase44
- case 45:
- goto stCase45
- case 46:
- goto stCase46
- case 47:
- goto stCase47
- case 48:
- goto stCase48
- case 49:
- goto stCase49
- case 50:
- goto stCase50
- case 51:
- goto stCase51
- case 52:
- goto stCase52
- case 53:
- goto stCase53
- case 54:
- goto stCase54
- case 55:
- goto stCase55
- case 56:
- goto stCase56
- case 57:
- goto stCase57
- case 58:
- goto stCase58
- case 59:
- goto stCase59
- case 60:
- goto stCase60
- case 61:
- goto stCase61
- case 62:
- goto stCase62
- case 63:
- goto stCase63
- case 64:
- goto stCase64
- case 65:
- goto stCase65
- case 66:
- goto stCase66
- case 67:
- goto stCase67
- case 68:
- goto stCase68
- case 69:
- goto stCase69
- case 70:
- goto stCase70
- case 71:
- goto stCase71
- case 72:
- goto stCase72
- case 73:
- goto stCase73
- case 74:
- goto stCase74
- case 75:
- goto stCase75
- case 76:
- goto stCase76
- case 77:
- goto stCase77
- case 78:
- goto stCase78
- case 79:
- goto stCase79
- case 80:
- goto stCase80
- case 81:
- goto stCase81
- case 82:
- goto stCase82
- case 83:
- goto stCase83
- case 84:
- goto stCase84
- case 85:
- goto stCase85
- case 86:
- goto stCase86
- case 87:
- goto stCase87
- case 88:
- goto stCase88
- case 89:
- goto stCase89
- case 90:
- goto stCase90
- case 91:
- goto stCase91
- case 92:
- goto stCase92
- case 93:
- goto stCase93
- case 94:
- goto stCase94
- case 95:
- goto stCase95
- case 96:
- goto stCase96
- case 97:
- goto stCase97
- case 98:
- goto stCase98
- case 99:
- goto stCase99
- case 100:
- goto stCase100
- case 101:
- goto stCase101
- case 102:
- goto stCase102
- case 103:
- goto stCase103
- case 104:
- goto stCase104
- case 105:
- goto stCase105
- case 106:
- goto stCase106
- case 107:
- goto stCase107
- case 108:
- goto stCase108
- case 109:
- goto stCase109
- case 110:
- goto stCase110
- case 111:
- goto stCase111
- case 112:
- goto stCase112
- case 113:
- goto stCase113
- case 114:
- goto stCase114
- case 115:
- goto stCase115
- case 116:
- goto stCase116
- case 117:
- goto stCase117
- case 118:
- goto stCase118
- case 119:
- goto stCase119
- case 120:
- goto stCase120
- case 121:
- goto stCase121
- case 122:
- goto stCase122
- case 123:
- goto stCase123
- case 124:
- goto stCase124
- case 125:
- goto stCase125
- case 126:
- goto stCase126
- case 127:
- goto stCase127
- case 128:
- goto stCase128
- case 129:
- goto stCase129
- case 130:
- goto stCase130
- case 131:
- goto stCase131
- case 132:
- goto stCase132
- case 133:
- goto stCase133
- case 134:
- goto stCase134
- case 135:
- goto stCase135
- case 136:
- goto stCase136
- case 137:
- goto stCase137
- case 138:
- goto stCase138
- case 139:
- goto stCase139
- case 140:
- goto stCase140
- case 141:
- goto stCase141
- case 142:
- goto stCase142
- case 143:
- goto stCase143
- case 144:
- goto stCase144
- case 145:
- goto stCase145
- case 146:
- goto stCase146
- case 147:
- goto stCase147
- case 148:
- goto stCase148
- case 149:
- goto stCase149
- case 150:
- goto stCase150
- case 151:
- goto stCase151
- case 152:
- goto stCase152
- case 153:
- goto stCase153
- case 154:
- goto stCase154
- case 155:
- goto stCase155
- case 156:
- goto stCase156
- case 157:
- goto stCase157
- case 158:
- goto stCase158
- case 159:
- goto stCase159
- case 160:
- goto stCase160
- case 161:
- goto stCase161
- case 162:
- goto stCase162
- case 163:
- goto stCase163
- case 164:
- goto stCase164
- case 165:
- goto stCase165
- case 166:
- goto stCase166
- case 167:
- goto stCase167
- case 168:
- goto stCase168
- case 169:
- goto stCase169
- case 170:
- goto stCase170
- case 171:
- goto stCase171
- case 172:
- goto stCase172
- case 173:
- goto stCase173
- case 174:
- goto stCase174
- case 175:
- goto stCase175
- case 176:
- goto stCase176
- case 177:
- goto stCase177
- case 178:
- goto stCase178
- case 179:
- goto stCase179
- case 180:
- goto stCase180
- case 181:
- goto stCase181
- case 182:
- goto stCase182
- case 183:
- goto stCase183
- case 184:
- goto stCase184
- case 185:
- goto stCase185
- case 186:
- goto stCase186
- case 187:
- goto stCase187
- case 188:
- goto stCase188
- case 189:
- goto stCase189
- case 190:
- goto stCase190
- case 191:
- goto stCase191
- case 192:
- goto stCase192
- case 193:
- goto stCase193
- case 194:
- goto stCase194
- case 195:
- goto stCase195
- case 196:
- goto stCase196
- case 197:
- goto stCase197
- case 198:
- goto stCase198
- case 199:
- goto stCase199
- case 200:
- goto stCase200
- case 201:
- goto stCase201
- case 202:
- goto stCase202
- case 203:
- goto stCase203
- case 204:
- goto stCase204
- case 205:
- goto stCase205
- case 206:
- goto stCase206
- case 207:
- goto stCase207
- case 208:
- goto stCase208
- case 209:
- goto stCase209
- case 210:
- goto stCase210
- case 211:
- goto stCase211
- case 212:
- goto stCase212
- case 213:
- goto stCase213
- case 214:
- goto stCase214
- case 215:
- goto stCase215
- case 216:
- goto stCase216
- case 217:
- goto stCase217
- case 218:
- goto stCase218
- case 219:
- goto stCase219
- case 220:
- goto stCase220
- case 221:
- goto stCase221
- case 222:
- goto stCase222
- case 223:
- goto stCase223
- case 224:
- goto stCase224
- case 225:
- goto stCase225
- case 226:
- goto stCase226
- case 227:
- goto stCase227
- case 228:
- goto stCase228
- case 229:
- goto stCase229
- case 230:
- goto stCase230
- case 231:
- goto stCase231
- case 232:
- goto stCase232
- case 233:
- goto stCase233
- case 234:
- goto stCase234
- case 235:
- goto stCase235
- case 236:
- goto stCase236
- case 237:
- goto stCase237
- case 238:
- goto stCase238
- case 239:
- goto stCase239
- case 240:
- goto stCase240
- case 241:
- goto stCase241
- case 242:
- goto stCase242
- case 243:
- goto stCase243
- case 244:
- goto stCase244
- case 245:
- goto stCase245
- case 246:
- goto stCase246
- case 247:
- goto stCase247
- case 248:
- goto stCase248
- case 249:
- goto stCase249
- case 250:
- goto stCase250
- case 251:
- goto stCase251
- case 252:
- goto stCase252
- case 253:
- goto stCase253
- case 254:
- goto stCase254
- case 255:
- goto stCase255
- case 256:
- goto stCase256
- case 257:
- goto stCase257
- case 258:
- goto stCase258
- case 259:
- goto stCase259
- case 260:
- goto stCase260
- case 261:
- goto stCase261
- case 262:
- goto stCase262
- case 263:
- goto stCase263
- case 264:
- goto stCase264
- case 265:
- goto stCase265
- case 266:
- goto stCase266
- case 267:
- goto stCase267
- case 268:
- goto stCase268
- case 269:
- goto stCase269
- case 270:
- goto stCase270
- case 271:
- goto stCase271
- case 272:
- goto stCase272
- case 273:
- goto stCase273
- case 274:
- goto stCase274
- case 275:
- goto stCase275
- case 276:
- goto stCase276
- case 277:
- goto stCase277
- case 278:
- goto stCase278
- case 279:
- goto stCase279
- case 280:
- goto stCase280
- case 281:
- goto stCase281
- case 282:
- goto stCase282
- case 283:
- goto stCase283
- case 284:
- goto stCase284
- case 285:
- goto stCase285
- case 286:
- goto stCase286
- case 287:
- goto stCase287
- case 288:
- goto stCase288
- case 289:
- goto stCase289
- case 290:
- goto stCase290
- case 291:
- goto stCase291
- case 292:
- goto stCase292
- case 293:
- goto stCase293
- case 294:
- goto stCase294
- case 295:
- goto stCase295
- case 296:
- goto stCase296
- case 297:
- goto stCase297
- case 298:
- goto stCase298
- case 299:
- goto stCase299
- case 300:
- goto stCase300
- case 301:
- goto stCase301
- case 302:
- goto stCase302
- case 303:
- goto stCase303
- case 304:
- goto stCase304
- case 305:
- goto stCase305
- case 306:
- goto stCase306
- case 307:
- goto stCase307
- case 308:
- goto stCase308
- case 309:
- goto stCase309
- case 310:
- goto stCase310
- case 311:
- goto stCase311
- case 312:
- goto stCase312
- case 313:
- goto stCase313
- case 314:
- goto stCase314
- case 315:
- goto stCase315
- case 316:
- goto stCase316
- case 317:
- goto stCase317
- case 318:
- goto stCase318
- case 319:
- goto stCase319
- case 320:
- goto stCase320
- case 321:
- goto stCase321
- case 322:
- goto stCase322
- case 323:
- goto stCase323
- case 324:
- goto stCase324
- case 325:
- goto stCase325
- case 326:
- goto stCase326
- case 327:
- goto stCase327
- case 328:
- goto stCase328
- case 329:
- goto stCase329
- case 330:
- goto stCase330
- case 331:
- goto stCase331
- case 332:
- goto stCase332
- case 373:
- goto stCase373
- }
- goto stOut
- stCase1:
- if (m.data)[(m.p)] == 60 {
- goto st2
- }
- goto tr0
- tr0:
-
- m.err = fmt.Errorf(errPri, m.p)
- (m.p)--
-
- {
- goto st373
- }
-
- goto st0
- tr2:
-
- m.err = fmt.Errorf(errPrival, m.p)
- (m.p)--
-
- {
- goto st373
- }
-
- m.err = fmt.Errorf(errPri, m.p)
- (m.p)--
-
- {
- goto st373
- }
-
- goto st0
- tr7:
-
- m.err = fmt.Errorf(errTimestamp, m.p)
- (m.p)--
-
- {
- goto st373
- }
-
- goto st0
- tr37:
-
- m.err = fmt.Errorf(errHostname, m.p)
- (m.p)--
-
- {
- goto st373
- }
-
- goto st0
- tr41:
-
- m.err = fmt.Errorf(errTag, m.p)
- (m.p)--
-
- {
- goto st373
- }
-
- goto st0
- tr333:
-
- m.err = fmt.Errorf(errRFC3339, m.p)
- (m.p)--
-
- {
- goto st373
- }
-
- goto st0
- stCase0:
- st0:
- m.cs = 0
- goto _out
- st2:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof2
- }
- stCase2:
- switch (m.data)[(m.p)] {
- case 48:
- goto tr3
- case 49:
- goto tr4
- }
- if 50 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto tr5
- }
- goto tr2
- tr3:
-
- m.pb = m.p
-
- goto st3
- st3:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof3
- }
- stCase3:
-
- output.priority = uint8(common.UnsafeUTF8DecimalCodePointsToInt(m.text()))
- output.prioritySet = true
-
- if (m.data)[(m.p)] == 62 {
- goto st4
- }
- goto tr2
- st4:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof4
- }
- stCase4:
- _widec = int16((m.data)[(m.p)])
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- switch _widec {
- case 65:
- goto tr8
- case 68:
- goto tr9
- case 70:
- goto tr10
- case 74:
- goto tr11
- case 77:
- goto tr12
- case 78:
- goto tr13
- case 79:
- goto tr14
- case 83:
- goto tr15
- }
- if 560 <= _widec && _widec <= 569 {
- goto tr16
- }
- goto tr7
- tr8:
-
- m.pb = m.p
-
- goto st5
- st5:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof5
- }
- stCase5:
- switch (m.data)[(m.p)] {
- case 112:
- goto st6
- case 117:
- goto st284
- }
- goto tr7
- st6:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof6
- }
- stCase6:
- if (m.data)[(m.p)] == 114 {
- goto st7
- }
- goto tr7
- st7:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof7
- }
- stCase7:
- if (m.data)[(m.p)] == 32 {
- goto st8
- }
- goto tr7
- st8:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof8
- }
- stCase8:
- switch (m.data)[(m.p)] {
- case 32:
- goto st9
- case 51:
- goto st283
- }
- if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 50 {
- goto st282
- }
- goto tr7
- st9:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof9
- }
- stCase9:
- if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st10
- }
- goto tr7
- st10:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof10
- }
- stCase10:
- if (m.data)[(m.p)] == 32 {
- goto st11
- }
- goto tr7
- st11:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof11
- }
- stCase11:
- if (m.data)[(m.p)] == 50 {
- goto st281
- }
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 49 {
- goto st12
- }
- goto tr7
- st12:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof12
- }
- stCase12:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st13
- }
- goto tr7
- st13:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof13
- }
- stCase13:
- if (m.data)[(m.p)] == 58 {
- goto st14
- }
- goto tr7
- st14:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof14
- }
- stCase14:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 53 {
- goto st15
- }
- goto tr7
- st15:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof15
- }
- stCase15:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st16
- }
- goto tr7
- st16:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof16
- }
- stCase16:
- if (m.data)[(m.p)] == 58 {
- goto st17
- }
- goto tr7
- st17:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof17
- }
- stCase17:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 53 {
- goto st18
- }
- goto tr7
- st18:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof18
- }
- stCase18:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st19
- }
- goto tr7
- st19:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof19
- }
- stCase19:
- if (m.data)[(m.p)] == 32 {
- goto tr35
- }
- goto st0
- tr35:
-
- if t, e := time.Parse(time.Stamp, string(m.text())); e != nil {
- m.err = fmt.Errorf("%s [col %d]", e, m.p)
- (m.p)--
-
- {
- goto st373
- }
- } else {
- if m.timezone != nil {
- t, _ = time.ParseInLocation(time.Stamp, string(m.text()), m.timezone)
- }
- output.timestamp = t.AddDate(m.yyyy, 0, 0)
- if m.loc != nil {
- output.timestamp = output.timestamp.In(m.loc)
- }
- output.timestampSet = true
- }
-
- goto st20
- tr341:
-
- if t, e := time.Parse(time.RFC3339, string(m.text())); e != nil {
- m.err = fmt.Errorf("%s [col %d]", e, m.p)
- (m.p)--
-
- {
- goto st373
- }
- } else {
- output.timestamp = t
- output.timestampSet = true
- }
-
- goto st20
- st20:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof20
- }
- stCase20:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto tr38
- }
- goto tr37
- tr38:
-
- m.pb = m.p
-
- goto st21
- st21:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof21
- }
- stCase21:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st27
- }
- goto tr37
- tr39:
-
- output.hostname = string(m.text())
-
- goto st22
- st22:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof22
- }
- stCase22:
- if (m.data)[(m.p)] == 127 {
- goto tr41
- }
- switch {
- case (m.data)[(m.p)] < 33:
- if (m.data)[(m.p)] <= 31 {
- goto tr41
- }
- case (m.data)[(m.p)] > 57:
- switch {
- case (m.data)[(m.p)] > 90:
- if 92 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto tr43
- }
- case (m.data)[(m.p)] >= 59:
- goto tr43
- }
- default:
- goto tr43
- }
- goto tr42
- tr42:
-
- m.pb = m.p
-
- goto st333
- st333:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof333
- }
- stCase333:
- if (m.data)[(m.p)] == 127 {
- goto st0
- }
- if (m.data)[(m.p)] <= 31 {
- goto st0
- }
- goto st333
- tr43:
-
- m.pb = m.p
-
- goto st334
- st334:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof334
- }
- stCase334:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st335
- }
- default:
- goto st0
- }
- goto st333
- st335:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof335
- }
- stCase335:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st336
- }
- default:
- goto st0
- }
- goto st333
- st336:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof336
- }
- stCase336:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st337
- }
- default:
- goto st0
- }
- goto st333
- st337:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof337
- }
- stCase337:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st338
- }
- default:
- goto st0
- }
- goto st333
- st338:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof338
- }
- stCase338:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st339
- }
- default:
- goto st0
- }
- goto st333
- st339:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof339
- }
- stCase339:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st340
- }
- default:
- goto st0
- }
- goto st333
- st340:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof340
- }
- stCase340:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st341
- }
- default:
- goto st0
- }
- goto st333
- st341:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof341
- }
- stCase341:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st342
- }
- default:
- goto st0
- }
- goto st333
- st342:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof342
- }
- stCase342:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st343
- }
- default:
- goto st0
- }
- goto st333
- st343:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof343
- }
- stCase343:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st344
- }
- default:
- goto st0
- }
- goto st333
- st344:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof344
- }
- stCase344:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st345
- }
- default:
- goto st0
- }
- goto st333
- st345:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof345
- }
- stCase345:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st346
- }
- default:
- goto st0
- }
- goto st333
- st346:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof346
- }
- stCase346:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st347
- }
- default:
- goto st0
- }
- goto st333
- st347:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof347
- }
- stCase347:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st348
- }
- default:
- goto st0
- }
- goto st333
- st348:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof348
- }
- stCase348:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st349
- }
- default:
- goto st0
- }
- goto st333
- st349:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof349
- }
- stCase349:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st350
- }
- default:
- goto st0
- }
- goto st333
- st350:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof350
- }
- stCase350:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st351
- }
- default:
- goto st0
- }
- goto st333
- st351:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof351
- }
- stCase351:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st352
- }
- default:
- goto st0
- }
- goto st333
- st352:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof352
- }
- stCase352:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st353
- }
- default:
- goto st0
- }
- goto st333
- st353:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof353
- }
- stCase353:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st354
- }
- default:
- goto st0
- }
- goto st333
- st354:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof354
- }
- stCase354:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st355
- }
- default:
- goto st0
- }
- goto st333
- st355:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof355
- }
- stCase355:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st356
- }
- default:
- goto st0
- }
- goto st333
- st356:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof356
- }
- stCase356:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st357
- }
- default:
- goto st0
- }
- goto st333
- st357:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof357
- }
- stCase357:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st358
- }
- default:
- goto st0
- }
- goto st333
- st358:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof358
- }
- stCase358:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st359
- }
- default:
- goto st0
- }
- goto st333
- st359:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof359
- }
- stCase359:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st360
- }
- default:
- goto st0
- }
- goto st333
- st360:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof360
- }
- stCase360:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st361
- }
- default:
- goto st0
- }
- goto st333
- st361:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof361
- }
- stCase361:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st362
- }
- default:
- goto st0
- }
- goto st333
- st362:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof362
- }
- stCase362:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st363
- }
- default:
- goto st0
- }
- goto st333
- st363:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof363
- }
- stCase363:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st364
- }
- default:
- goto st0
- }
- goto st333
- st364:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof364
- }
- stCase364:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- switch {
- case (m.data)[(m.p)] > 31:
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st365
- }
- default:
- goto st0
- }
- goto st333
- st365:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof365
- }
- stCase365:
- switch (m.data)[(m.p)] {
- case 58:
- goto tr347
- case 91:
- goto tr348
- case 127:
- goto st0
- }
- if (m.data)[(m.p)] <= 31 {
- goto st0
- }
- goto st333
- tr347:
-
- output.tag = string(m.text())
-
- goto st366
- st366:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof366
- }
- stCase366:
- switch (m.data)[(m.p)] {
- case 32:
- goto st367
- case 127:
- goto st0
- }
- if (m.data)[(m.p)] <= 31 {
- goto st0
- }
- goto st333
- st367:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof367
- }
- stCase367:
- if (m.data)[(m.p)] == 127 {
- goto st0
- }
- if (m.data)[(m.p)] <= 31 {
- goto st0
- }
- goto tr42
- tr348:
-
- output.tag = string(m.text())
-
- goto st368
- st368:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof368
- }
- stCase368:
- switch (m.data)[(m.p)] {
- case 93:
- goto tr381
- case 127:
- goto tr380
- }
- if (m.data)[(m.p)] <= 31 {
- goto tr380
- }
- goto tr48
- tr380:
-
- m.pb = m.p
-
- goto st23
- st23:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof23
- }
- stCase23:
- if (m.data)[(m.p)] == 93 {
- goto tr45
- }
- goto st23
- tr45:
-
- output.content = string(m.text())
-
- goto st24
- st24:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof24
- }
- stCase24:
- switch (m.data)[(m.p)] {
- case 58:
- goto st25
- case 93:
- goto tr45
- }
- goto st23
- st25:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof25
- }
- stCase25:
- switch (m.data)[(m.p)] {
- case 32:
- goto st26
- case 93:
- goto tr45
- }
- goto st23
- st26:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof26
- }
- stCase26:
- switch (m.data)[(m.p)] {
- case 93:
- goto tr49
- case 127:
- goto st23
- }
- if (m.data)[(m.p)] <= 31 {
- goto st23
- }
- goto tr48
- tr48:
-
- m.pb = m.p
-
- goto st369
- st369:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof369
- }
- stCase369:
- switch (m.data)[(m.p)] {
- case 93:
- goto tr383
- case 127:
- goto st23
- }
- if (m.data)[(m.p)] <= 31 {
- goto st23
- }
- goto st369
- tr383:
-
- output.content = string(m.text())
-
- goto st370
- tr49:
-
- output.content = string(m.text())
-
- m.pb = m.p
-
- goto st370
- tr381:
-
- m.pb = m.p
-
- output.content = string(m.text())
-
- goto st370
- st370:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof370
- }
- stCase370:
- switch (m.data)[(m.p)] {
- case 58:
- goto st371
- case 93:
- goto tr383
- case 127:
- goto st23
- }
- if (m.data)[(m.p)] <= 31 {
- goto st23
- }
- goto st369
- st371:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof371
- }
- stCase371:
- switch (m.data)[(m.p)] {
- case 32:
- goto st372
- case 93:
- goto tr383
- case 127:
- goto st23
- }
- if (m.data)[(m.p)] <= 31 {
- goto st23
- }
- goto st369
- st372:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof372
- }
- stCase372:
- switch (m.data)[(m.p)] {
- case 93:
- goto tr49
- case 127:
- goto st23
- }
- if (m.data)[(m.p)] <= 31 {
- goto st23
- }
- goto tr48
- st27:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof27
- }
- stCase27:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st28
- }
- goto tr37
- st28:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof28
- }
- stCase28:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st29
- }
- goto tr37
- st29:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof29
- }
- stCase29:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st30
- }
- goto tr37
- st30:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof30
- }
- stCase30:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st31
- }
- goto tr37
- st31:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof31
- }
- stCase31:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st32
- }
- goto tr37
- st32:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof32
- }
- stCase32:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st33
- }
- goto tr37
- st33:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof33
- }
- stCase33:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st34
- }
- goto tr37
- st34:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof34
- }
- stCase34:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st35
- }
- goto tr37
- st35:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof35
- }
- stCase35:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st36
- }
- goto tr37
- st36:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof36
- }
- stCase36:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st37
- }
- goto tr37
- st37:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof37
- }
- stCase37:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st38
- }
- goto tr37
- st38:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof38
- }
- stCase38:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st39
- }
- goto tr37
- st39:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof39
- }
- stCase39:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st40
- }
- goto tr37
- st40:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof40
- }
- stCase40:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st41
- }
- goto tr37
- st41:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof41
- }
- stCase41:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st42
- }
- goto tr37
- st42:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof42
- }
- stCase42:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st43
- }
- goto tr37
- st43:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof43
- }
- stCase43:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st44
- }
- goto tr37
- st44:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof44
- }
- stCase44:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st45
- }
- goto tr37
- st45:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof45
- }
- stCase45:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st46
- }
- goto tr37
- st46:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof46
- }
- stCase46:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st47
- }
- goto tr37
- st47:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof47
- }
- stCase47:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st48
- }
- goto tr37
- st48:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof48
- }
- stCase48:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st49
- }
- goto tr37
- st49:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof49
- }
- stCase49:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st50
- }
- goto tr37
- st50:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof50
- }
- stCase50:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st51
- }
- goto tr37
- st51:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof51
- }
- stCase51:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st52
- }
- goto tr37
- st52:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof52
- }
- stCase52:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st53
- }
- goto tr37
- st53:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof53
- }
- stCase53:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st54
- }
- goto tr37
- st54:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof54
- }
- stCase54:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st55
- }
- goto tr37
- st55:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof55
- }
- stCase55:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st56
- }
- goto tr37
- st56:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof56
- }
- stCase56:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st57
- }
- goto tr37
- st57:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof57
- }
- stCase57:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st58
- }
- goto tr37
- st58:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof58
- }
- stCase58:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st59
- }
- goto tr37
- st59:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof59
- }
- stCase59:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st60
- }
- goto tr37
- st60:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof60
- }
- stCase60:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st61
- }
- goto tr37
- st61:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof61
- }
- stCase61:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st62
- }
- goto tr37
- st62:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof62
- }
- stCase62:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st63
- }
- goto tr37
- st63:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof63
- }
- stCase63:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st64
- }
- goto tr37
- st64:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof64
- }
- stCase64:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st65
- }
- goto tr37
- st65:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof65
- }
- stCase65:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st66
- }
- goto tr37
- st66:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof66
- }
- stCase66:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st67
- }
- goto tr37
- st67:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof67
- }
- stCase67:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st68
- }
- goto tr37
- st68:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof68
- }
- stCase68:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st69
- }
- goto tr37
- st69:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof69
- }
- stCase69:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st70
- }
- goto tr37
- st70:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof70
- }
- stCase70:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st71
- }
- goto tr37
- st71:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof71
- }
- stCase71:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st72
- }
- goto tr37
- st72:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof72
- }
- stCase72:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st73
- }
- goto tr37
- st73:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof73
- }
- stCase73:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st74
- }
- goto tr37
- st74:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof74
- }
- stCase74:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st75
- }
- goto tr37
- st75:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof75
- }
- stCase75:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st76
- }
- goto tr37
- st76:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof76
- }
- stCase76:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st77
- }
- goto tr37
- st77:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof77
- }
- stCase77:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st78
- }
- goto tr37
- st78:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof78
- }
- stCase78:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st79
- }
- goto tr37
- st79:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof79
- }
- stCase79:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st80
- }
- goto tr37
- st80:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof80
- }
- stCase80:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st81
- }
- goto tr37
- st81:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof81
- }
- stCase81:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st82
- }
- goto tr37
- st82:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof82
- }
- stCase82:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st83
- }
- goto tr37
- st83:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof83
- }
- stCase83:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st84
- }
- goto tr37
- st84:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof84
- }
- stCase84:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st85
- }
- goto tr37
- st85:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof85
- }
- stCase85:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st86
- }
- goto tr37
- st86:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof86
- }
- stCase86:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st87
- }
- goto tr37
- st87:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof87
- }
- stCase87:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st88
- }
- goto tr37
- st88:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof88
- }
- stCase88:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st89
- }
- goto tr37
- st89:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof89
- }
- stCase89:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st90
- }
- goto tr37
- st90:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof90
- }
- stCase90:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st91
- }
- goto tr37
- st91:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof91
- }
- stCase91:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st92
- }
- goto tr37
- st92:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof92
- }
- stCase92:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st93
- }
- goto tr37
- st93:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof93
- }
- stCase93:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st94
- }
- goto tr37
- st94:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof94
- }
- stCase94:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st95
- }
- goto tr37
- st95:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof95
- }
- stCase95:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st96
- }
- goto tr37
- st96:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof96
- }
- stCase96:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st97
- }
- goto tr37
- st97:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof97
- }
- stCase97:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st98
- }
- goto tr37
- st98:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof98
- }
- stCase98:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st99
- }
- goto tr37
- st99:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof99
- }
- stCase99:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st100
- }
- goto tr37
- st100:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof100
- }
- stCase100:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st101
- }
- goto tr37
- st101:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof101
- }
- stCase101:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st102
- }
- goto tr37
- st102:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof102
- }
- stCase102:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st103
- }
- goto tr37
- st103:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof103
- }
- stCase103:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st104
- }
- goto tr37
- st104:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof104
- }
- stCase104:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st105
- }
- goto tr37
- st105:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof105
- }
- stCase105:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st106
- }
- goto tr37
- st106:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof106
- }
- stCase106:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st107
- }
- goto tr37
- st107:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof107
- }
- stCase107:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st108
- }
- goto tr37
- st108:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof108
- }
- stCase108:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st109
- }
- goto tr37
- st109:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof109
- }
- stCase109:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st110
- }
- goto tr37
- st110:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof110
- }
- stCase110:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st111
- }
- goto tr37
- st111:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof111
- }
- stCase111:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st112
- }
- goto tr37
- st112:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof112
- }
- stCase112:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st113
- }
- goto tr37
- st113:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof113
- }
- stCase113:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st114
- }
- goto tr37
- st114:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof114
- }
- stCase114:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st115
- }
- goto tr37
- st115:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof115
- }
- stCase115:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st116
- }
- goto tr37
- st116:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof116
- }
- stCase116:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st117
- }
- goto tr37
- st117:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof117
- }
- stCase117:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st118
- }
- goto tr37
- st118:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof118
- }
- stCase118:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st119
- }
- goto tr37
- st119:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof119
- }
- stCase119:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st120
- }
- goto tr37
- st120:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof120
- }
- stCase120:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st121
- }
- goto tr37
- st121:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof121
- }
- stCase121:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st122
- }
- goto tr37
- st122:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof122
- }
- stCase122:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st123
- }
- goto tr37
- st123:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof123
- }
- stCase123:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st124
- }
- goto tr37
- st124:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof124
- }
- stCase124:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st125
- }
- goto tr37
- st125:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof125
- }
- stCase125:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st126
- }
- goto tr37
- st126:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof126
- }
- stCase126:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st127
- }
- goto tr37
- st127:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof127
- }
- stCase127:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st128
- }
- goto tr37
- st128:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof128
- }
- stCase128:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st129
- }
- goto tr37
- st129:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof129
- }
- stCase129:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st130
- }
- goto tr37
- st130:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof130
- }
- stCase130:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st131
- }
- goto tr37
- st131:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof131
- }
- stCase131:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st132
- }
- goto tr37
- st132:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof132
- }
- stCase132:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st133
- }
- goto tr37
- st133:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof133
- }
- stCase133:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st134
- }
- goto tr37
- st134:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof134
- }
- stCase134:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st135
- }
- goto tr37
- st135:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof135
- }
- stCase135:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st136
- }
- goto tr37
- st136:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof136
- }
- stCase136:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st137
- }
- goto tr37
- st137:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof137
- }
- stCase137:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st138
- }
- goto tr37
- st138:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof138
- }
- stCase138:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st139
- }
- goto tr37
- st139:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof139
- }
- stCase139:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st140
- }
- goto tr37
- st140:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof140
- }
- stCase140:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st141
- }
- goto tr37
- st141:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof141
- }
- stCase141:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st142
- }
- goto tr37
- st142:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof142
- }
- stCase142:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st143
- }
- goto tr37
- st143:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof143
- }
- stCase143:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st144
- }
- goto tr37
- st144:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof144
- }
- stCase144:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st145
- }
- goto tr37
- st145:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof145
- }
- stCase145:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st146
- }
- goto tr37
- st146:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof146
- }
- stCase146:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st147
- }
- goto tr37
- st147:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof147
- }
- stCase147:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st148
- }
- goto tr37
- st148:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof148
- }
- stCase148:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st149
- }
- goto tr37
- st149:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof149
- }
- stCase149:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st150
- }
- goto tr37
- st150:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof150
- }
- stCase150:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st151
- }
- goto tr37
- st151:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof151
- }
- stCase151:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st152
- }
- goto tr37
- st152:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof152
- }
- stCase152:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st153
- }
- goto tr37
- st153:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof153
- }
- stCase153:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st154
- }
- goto tr37
- st154:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof154
- }
- stCase154:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st155
- }
- goto tr37
- st155:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof155
- }
- stCase155:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st156
- }
- goto tr37
- st156:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof156
- }
- stCase156:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st157
- }
- goto tr37
- st157:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof157
- }
- stCase157:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st158
- }
- goto tr37
- st158:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof158
- }
- stCase158:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st159
- }
- goto tr37
- st159:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof159
- }
- stCase159:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st160
- }
- goto tr37
- st160:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof160
- }
- stCase160:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st161
- }
- goto tr37
- st161:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof161
- }
- stCase161:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st162
- }
- goto tr37
- st162:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof162
- }
- stCase162:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st163
- }
- goto tr37
- st163:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof163
- }
- stCase163:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st164
- }
- goto tr37
- st164:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof164
- }
- stCase164:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st165
- }
- goto tr37
- st165:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof165
- }
- stCase165:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st166
- }
- goto tr37
- st166:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof166
- }
- stCase166:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st167
- }
- goto tr37
- st167:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof167
- }
- stCase167:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st168
- }
- goto tr37
- st168:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof168
- }
- stCase168:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st169
- }
- goto tr37
- st169:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof169
- }
- stCase169:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st170
- }
- goto tr37
- st170:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof170
- }
- stCase170:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st171
- }
- goto tr37
- st171:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof171
- }
- stCase171:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st172
- }
- goto tr37
- st172:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof172
- }
- stCase172:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st173
- }
- goto tr37
- st173:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof173
- }
- stCase173:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st174
- }
- goto tr37
- st174:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof174
- }
- stCase174:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st175
- }
- goto tr37
- st175:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof175
- }
- stCase175:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st176
- }
- goto tr37
- st176:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof176
- }
- stCase176:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st177
- }
- goto tr37
- st177:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof177
- }
- stCase177:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st178
- }
- goto tr37
- st178:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof178
- }
- stCase178:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st179
- }
- goto tr37
- st179:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof179
- }
- stCase179:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st180
- }
- goto tr37
- st180:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof180
- }
- stCase180:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st181
- }
- goto tr37
- st181:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof181
- }
- stCase181:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st182
- }
- goto tr37
- st182:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof182
- }
- stCase182:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st183
- }
- goto tr37
- st183:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof183
- }
- stCase183:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st184
- }
- goto tr37
- st184:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof184
- }
- stCase184:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st185
- }
- goto tr37
- st185:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof185
- }
- stCase185:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st186
- }
- goto tr37
- st186:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof186
- }
- stCase186:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st187
- }
- goto tr37
- st187:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof187
- }
- stCase187:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st188
- }
- goto tr37
- st188:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof188
- }
- stCase188:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st189
- }
- goto tr37
- st189:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof189
- }
- stCase189:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st190
- }
- goto tr37
- st190:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof190
- }
- stCase190:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st191
- }
- goto tr37
- st191:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof191
- }
- stCase191:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st192
- }
- goto tr37
- st192:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof192
- }
- stCase192:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st193
- }
- goto tr37
- st193:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof193
- }
- stCase193:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st194
- }
- goto tr37
- st194:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof194
- }
- stCase194:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st195
- }
- goto tr37
- st195:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof195
- }
- stCase195:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st196
- }
- goto tr37
- st196:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof196
- }
- stCase196:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st197
- }
- goto tr37
- st197:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof197
- }
- stCase197:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st198
- }
- goto tr37
- st198:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof198
- }
- stCase198:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st199
- }
- goto tr37
- st199:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof199
- }
- stCase199:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st200
- }
- goto tr37
- st200:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof200
- }
- stCase200:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st201
- }
- goto tr37
- st201:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof201
- }
- stCase201:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st202
- }
- goto tr37
- st202:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof202
- }
- stCase202:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st203
- }
- goto tr37
- st203:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof203
- }
- stCase203:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st204
- }
- goto tr37
- st204:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof204
- }
- stCase204:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st205
- }
- goto tr37
- st205:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof205
- }
- stCase205:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st206
- }
- goto tr37
- st206:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof206
- }
- stCase206:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st207
- }
- goto tr37
- st207:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof207
- }
- stCase207:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st208
- }
- goto tr37
- st208:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof208
- }
- stCase208:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st209
- }
- goto tr37
- st209:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof209
- }
- stCase209:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st210
- }
- goto tr37
- st210:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof210
- }
- stCase210:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st211
- }
- goto tr37
- st211:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof211
- }
- stCase211:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st212
- }
- goto tr37
- st212:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof212
- }
- stCase212:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st213
- }
- goto tr37
- st213:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof213
- }
- stCase213:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st214
- }
- goto tr37
- st214:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof214
- }
- stCase214:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st215
- }
- goto tr37
- st215:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof215
- }
- stCase215:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st216
- }
- goto tr37
- st216:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof216
- }
- stCase216:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st217
- }
- goto tr37
- st217:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof217
- }
- stCase217:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st218
- }
- goto tr37
- st218:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof218
- }
- stCase218:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st219
- }
- goto tr37
- st219:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof219
- }
- stCase219:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st220
- }
- goto tr37
- st220:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof220
- }
- stCase220:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st221
- }
- goto tr37
- st221:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof221
- }
- stCase221:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st222
- }
- goto tr37
- st222:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof222
- }
- stCase222:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st223
- }
- goto tr37
- st223:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof223
- }
- stCase223:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st224
- }
- goto tr37
- st224:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof224
- }
- stCase224:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st225
- }
- goto tr37
- st225:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof225
- }
- stCase225:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st226
- }
- goto tr37
- st226:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof226
- }
- stCase226:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st227
- }
- goto tr37
- st227:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof227
- }
- stCase227:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st228
- }
- goto tr37
- st228:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof228
- }
- stCase228:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st229
- }
- goto tr37
- st229:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof229
- }
- stCase229:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st230
- }
- goto tr37
- st230:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof230
- }
- stCase230:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st231
- }
- goto tr37
- st231:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof231
- }
- stCase231:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st232
- }
- goto tr37
- st232:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof232
- }
- stCase232:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st233
- }
- goto tr37
- st233:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof233
- }
- stCase233:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st234
- }
- goto tr37
- st234:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof234
- }
- stCase234:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st235
- }
- goto tr37
- st235:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof235
- }
- stCase235:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st236
- }
- goto tr37
- st236:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof236
- }
- stCase236:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st237
- }
- goto tr37
- st237:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof237
- }
- stCase237:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st238
- }
- goto tr37
- st238:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof238
- }
- stCase238:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st239
- }
- goto tr37
- st239:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof239
- }
- stCase239:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st240
- }
- goto tr37
- st240:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof240
- }
- stCase240:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st241
- }
- goto tr37
- st241:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof241
- }
- stCase241:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st242
- }
- goto tr37
- st242:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof242
- }
- stCase242:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st243
- }
- goto tr37
- st243:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof243
- }
- stCase243:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st244
- }
- goto tr37
- st244:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof244
- }
- stCase244:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st245
- }
- goto tr37
- st245:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof245
- }
- stCase245:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st246
- }
- goto tr37
- st246:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof246
- }
- stCase246:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st247
- }
- goto tr37
- st247:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof247
- }
- stCase247:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st248
- }
- goto tr37
- st248:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof248
- }
- stCase248:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st249
- }
- goto tr37
- st249:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof249
- }
- stCase249:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st250
- }
- goto tr37
- st250:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof250
- }
- stCase250:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st251
- }
- goto tr37
- st251:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof251
- }
- stCase251:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st252
- }
- goto tr37
- st252:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof252
- }
- stCase252:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st253
- }
- goto tr37
- st253:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof253
- }
- stCase253:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st254
- }
- goto tr37
- st254:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof254
- }
- stCase254:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st255
- }
- goto tr37
- st255:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof255
- }
- stCase255:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st256
- }
- goto tr37
- st256:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof256
- }
- stCase256:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st257
- }
- goto tr37
- st257:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof257
- }
- stCase257:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st258
- }
- goto tr37
- st258:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof258
- }
- stCase258:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st259
- }
- goto tr37
- st259:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof259
- }
- stCase259:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st260
- }
- goto tr37
- st260:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof260
- }
- stCase260:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st261
- }
- goto tr37
- st261:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof261
- }
- stCase261:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st262
- }
- goto tr37
- st262:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof262
- }
- stCase262:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st263
- }
- goto tr37
- st263:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof263
- }
- stCase263:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st264
- }
- goto tr37
- st264:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof264
- }
- stCase264:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st265
- }
- goto tr37
- st265:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof265
- }
- stCase265:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st266
- }
- goto tr37
- st266:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof266
- }
- stCase266:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st267
- }
- goto tr37
- st267:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof267
- }
- stCase267:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st268
- }
- goto tr37
- st268:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof268
- }
- stCase268:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st269
- }
- goto tr37
- st269:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof269
- }
- stCase269:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st270
- }
- goto tr37
- st270:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof270
- }
- stCase270:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st271
- }
- goto tr37
- st271:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof271
- }
- stCase271:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st272
- }
- goto tr37
- st272:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof272
- }
- stCase272:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st273
- }
- goto tr37
- st273:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof273
- }
- stCase273:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st274
- }
- goto tr37
- st274:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof274
- }
- stCase274:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st275
- }
- goto tr37
- st275:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof275
- }
- stCase275:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st276
- }
- goto tr37
- st276:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof276
- }
- stCase276:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st277
- }
- goto tr37
- st277:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof277
- }
- stCase277:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st278
- }
- goto tr37
- st278:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof278
- }
- stCase278:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st279
- }
- goto tr37
- st279:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof279
- }
- stCase279:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
- goto st280
- }
- goto tr37
- st280:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof280
- }
- stCase280:
- if (m.data)[(m.p)] == 32 {
- goto tr39
- }
- goto tr37
- st281:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof281
- }
- stCase281:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 51 {
- goto st13
- }
- goto tr7
- st282:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof282
- }
- stCase282:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st10
- }
- goto tr7
- st283:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof283
- }
- stCase283:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 49 {
- goto st10
- }
- goto tr7
- st284:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof284
- }
- stCase284:
- if (m.data)[(m.p)] == 103 {
- goto st7
- }
- goto tr7
- tr9:
-
- m.pb = m.p
-
- goto st285
- st285:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof285
- }
- stCase285:
- if (m.data)[(m.p)] == 101 {
- goto st286
- }
- goto tr7
- st286:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof286
- }
- stCase286:
- if (m.data)[(m.p)] == 99 {
- goto st7
- }
- goto tr7
- tr10:
-
- m.pb = m.p
-
- goto st287
- st287:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof287
- }
- stCase287:
- if (m.data)[(m.p)] == 101 {
- goto st288
- }
- goto tr7
- st288:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof288
- }
- stCase288:
- if (m.data)[(m.p)] == 98 {
- goto st7
- }
- goto tr7
- tr11:
-
- m.pb = m.p
-
- goto st289
- st289:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof289
- }
- stCase289:
- switch (m.data)[(m.p)] {
- case 97:
- goto st290
- case 117:
- goto st291
- }
- goto tr7
- st290:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof290
- }
- stCase290:
- if (m.data)[(m.p)] == 110 {
- goto st7
- }
- goto tr7
- st291:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof291
- }
- stCase291:
- switch (m.data)[(m.p)] {
- case 108:
- goto st7
- case 110:
- goto st7
- }
- goto tr7
- tr12:
-
- m.pb = m.p
-
- goto st292
- st292:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof292
- }
- stCase292:
- if (m.data)[(m.p)] == 97 {
- goto st293
- }
- goto tr7
- st293:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof293
- }
- stCase293:
- switch (m.data)[(m.p)] {
- case 114:
- goto st7
- case 121:
- goto st7
- }
- goto tr7
- tr13:
-
- m.pb = m.p
-
- goto st294
- st294:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof294
- }
- stCase294:
- if (m.data)[(m.p)] == 111 {
- goto st295
- }
- goto tr7
- st295:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof295
- }
- stCase295:
- if (m.data)[(m.p)] == 118 {
- goto st7
- }
- goto tr7
- tr14:
-
- m.pb = m.p
-
- goto st296
- st296:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof296
- }
- stCase296:
- if (m.data)[(m.p)] == 99 {
- goto st297
- }
- goto tr7
- st297:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof297
- }
- stCase297:
- if (m.data)[(m.p)] == 116 {
- goto st7
- }
- goto tr7
- tr15:
-
- m.pb = m.p
-
- goto st298
- st298:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof298
- }
- stCase298:
- if (m.data)[(m.p)] == 101 {
- goto st299
- }
- goto tr7
- st299:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof299
- }
- stCase299:
- if (m.data)[(m.p)] == 112 {
- goto st7
- }
- goto tr7
- tr16:
-
- m.pb = m.p
-
- goto st300
- st300:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof300
- }
- stCase300:
- _widec = int16((m.data)[(m.p)])
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if 560 <= _widec && _widec <= 569 {
- goto st301
- }
- goto st0
- st301:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof301
- }
- stCase301:
- _widec = int16((m.data)[(m.p)])
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if 560 <= _widec && _widec <= 569 {
- goto st302
- }
- goto st0
- st302:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof302
- }
- stCase302:
- _widec = int16((m.data)[(m.p)])
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if 560 <= _widec && _widec <= 569 {
- goto st303
- }
- goto st0
- st303:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof303
- }
- stCase303:
- _widec = int16((m.data)[(m.p)])
- if 45 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 45 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if _widec == 557 {
- goto st304
- }
- goto st0
- st304:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof304
- }
- stCase304:
- _widec = int16((m.data)[(m.p)])
- switch {
- case (m.data)[(m.p)] > 48:
- if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 49 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- case (m.data)[(m.p)] >= 48:
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- switch _widec {
- case 560:
- goto st305
- case 561:
- goto st329
- }
- goto st0
- st305:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof305
- }
- stCase305:
- _widec = int16((m.data)[(m.p)])
- if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if 561 <= _widec && _widec <= 569 {
- goto st306
- }
- goto st0
- st306:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof306
- }
- stCase306:
- _widec = int16((m.data)[(m.p)])
- if 45 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 45 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if _widec == 557 {
- goto st307
- }
- goto st0
- st307:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof307
- }
- stCase307:
- _widec = int16((m.data)[(m.p)])
- switch {
- case (m.data)[(m.p)] < 49:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 48 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- case (m.data)[(m.p)] > 50:
- if 51 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 51 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- default:
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- switch _widec {
- case 560:
- goto st308
- case 563:
- goto st328
- }
- if 561 <= _widec && _widec <= 562 {
- goto st327
- }
- goto st0
- st308:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof308
- }
- stCase308:
- _widec = int16((m.data)[(m.p)])
- if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if 561 <= _widec && _widec <= 569 {
- goto st309
- }
- goto st0
- st309:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof309
- }
- stCase309:
- _widec = int16((m.data)[(m.p)])
- if 84 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 84 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if _widec == 596 {
- goto st310
- }
- goto st0
- st310:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof310
- }
- stCase310:
- _widec = int16((m.data)[(m.p)])
- switch {
- case (m.data)[(m.p)] > 49:
- if 50 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 50 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- case (m.data)[(m.p)] >= 48:
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if _widec == 562 {
- goto st326
- }
- if 560 <= _widec && _widec <= 561 {
- goto st311
- }
- goto st0
- st311:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof311
- }
- stCase311:
- _widec = int16((m.data)[(m.p)])
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if 560 <= _widec && _widec <= 569 {
- goto st312
- }
- goto st0
- st312:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof312
- }
- stCase312:
- _widec = int16((m.data)[(m.p)])
- if 58 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 58 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if _widec == 570 {
- goto st313
- }
- goto st0
- st313:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof313
- }
- stCase313:
- _widec = int16((m.data)[(m.p)])
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 53 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if 560 <= _widec && _widec <= 565 {
- goto st314
- }
- goto st0
- st314:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof314
- }
- stCase314:
- _widec = int16((m.data)[(m.p)])
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if 560 <= _widec && _widec <= 569 {
- goto st315
- }
- goto st0
- st315:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof315
- }
- stCase315:
- _widec = int16((m.data)[(m.p)])
- if 58 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 58 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if _widec == 570 {
- goto st316
- }
- goto st0
- st316:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof316
- }
- stCase316:
- _widec = int16((m.data)[(m.p)])
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 53 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if 560 <= _widec && _widec <= 565 {
- goto st317
- }
- goto st0
- st317:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof317
- }
- stCase317:
- _widec = int16((m.data)[(m.p)])
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if 560 <= _widec && _widec <= 569 {
- goto st318
- }
- goto st0
- st318:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof318
- }
- stCase318:
- _widec = int16((m.data)[(m.p)])
- switch {
- case (m.data)[(m.p)] < 45:
- if 43 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 43 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- case (m.data)[(m.p)] > 45:
- if 90 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 90 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- default:
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- switch _widec {
- case 555:
- goto st319
- case 557:
- goto st319
- case 602:
- goto st324
- }
- goto tr333
- st319:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof319
- }
- stCase319:
- _widec = int16((m.data)[(m.p)])
- switch {
- case (m.data)[(m.p)] > 49:
- if 50 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 50 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- case (m.data)[(m.p)] >= 48:
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if _widec == 562 {
- goto st325
- }
- if 560 <= _widec && _widec <= 561 {
- goto st320
- }
- goto tr333
- st320:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof320
- }
- stCase320:
- _widec = int16((m.data)[(m.p)])
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if 560 <= _widec && _widec <= 569 {
- goto st321
- }
- goto tr333
- st321:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof321
- }
- stCase321:
- _widec = int16((m.data)[(m.p)])
- if 58 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 58 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if _widec == 570 {
- goto st322
- }
- goto tr333
- st322:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof322
- }
- stCase322:
- _widec = int16((m.data)[(m.p)])
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 53 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if 560 <= _widec && _widec <= 565 {
- goto st323
- }
- goto tr333
- st323:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof323
- }
- stCase323:
- _widec = int16((m.data)[(m.p)])
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if 560 <= _widec && _widec <= 569 {
- goto st324
- }
- goto tr333
- st324:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof324
- }
- stCase324:
- if (m.data)[(m.p)] == 32 {
- goto tr341
- }
- goto st0
- st325:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof325
- }
- stCase325:
- _widec = int16((m.data)[(m.p)])
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 51 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if 560 <= _widec && _widec <= 563 {
- goto st321
- }
- goto tr333
- st326:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof326
- }
- stCase326:
- _widec = int16((m.data)[(m.p)])
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 51 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if 560 <= _widec && _widec <= 563 {
- goto st312
- }
- goto st0
- st327:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof327
- }
- stCase327:
- _widec = int16((m.data)[(m.p)])
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if 560 <= _widec && _widec <= 569 {
- goto st309
- }
- goto st0
- st328:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof328
- }
- stCase328:
- _widec = int16((m.data)[(m.p)])
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 49 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if 560 <= _widec && _widec <= 561 {
- goto st309
- }
- goto st0
- st329:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof329
- }
- stCase329:
- _widec = int16((m.data)[(m.p)])
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 50 {
- _widec = 256 + (int16((m.data)[(m.p)]) - 0)
- if m.rfc3339 {
- _widec += 256
- }
- }
- if 560 <= _widec && _widec <= 562 {
- goto st306
- }
- goto st0
- tr4:
-
- m.pb = m.p
-
- goto st330
- st330:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof330
- }
- stCase330:
-
- output.priority = uint8(common.UnsafeUTF8DecimalCodePointsToInt(m.text()))
- output.prioritySet = true
-
- switch (m.data)[(m.p)] {
- case 57:
- goto st332
- case 62:
- goto st4
- }
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 56 {
- goto st331
- }
- goto tr2
- tr5:
-
- m.pb = m.p
-
- goto st331
- st331:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof331
- }
- stCase331:
-
- output.priority = uint8(common.UnsafeUTF8DecimalCodePointsToInt(m.text()))
- output.prioritySet = true
-
- if (m.data)[(m.p)] == 62 {
- goto st4
- }
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st3
- }
- goto tr2
- st332:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof332
- }
- stCase332:
-
- output.priority = uint8(common.UnsafeUTF8DecimalCodePointsToInt(m.text()))
- output.prioritySet = true
-
- if (m.data)[(m.p)] == 62 {
- goto st4
- }
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 49 {
- goto st3
- }
- goto tr2
- st373:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof373
- }
- stCase373:
- switch (m.data)[(m.p)] {
- case 10:
- goto st0
- case 13:
- goto st0
- }
- goto st373
- stOut:
- _testEof2:
- m.cs = 2
- goto _testEof
- _testEof3:
- m.cs = 3
- goto _testEof
- _testEof4:
- m.cs = 4
- goto _testEof
- _testEof5:
- m.cs = 5
- goto _testEof
- _testEof6:
- m.cs = 6
- goto _testEof
- _testEof7:
- m.cs = 7
- goto _testEof
- _testEof8:
- m.cs = 8
- goto _testEof
- _testEof9:
- m.cs = 9
- goto _testEof
- _testEof10:
- m.cs = 10
- goto _testEof
- _testEof11:
- m.cs = 11
- goto _testEof
- _testEof12:
- m.cs = 12
- goto _testEof
- _testEof13:
- m.cs = 13
- goto _testEof
- _testEof14:
- m.cs = 14
- goto _testEof
- _testEof15:
- m.cs = 15
- goto _testEof
- _testEof16:
- m.cs = 16
- goto _testEof
- _testEof17:
- m.cs = 17
- goto _testEof
- _testEof18:
- m.cs = 18
- goto _testEof
- _testEof19:
- m.cs = 19
- goto _testEof
- _testEof20:
- m.cs = 20
- goto _testEof
- _testEof21:
- m.cs = 21
- goto _testEof
- _testEof22:
- m.cs = 22
- goto _testEof
- _testEof333:
- m.cs = 333
- goto _testEof
- _testEof334:
- m.cs = 334
- goto _testEof
- _testEof335:
- m.cs = 335
- goto _testEof
- _testEof336:
- m.cs = 336
- goto _testEof
- _testEof337:
- m.cs = 337
- goto _testEof
- _testEof338:
- m.cs = 338
- goto _testEof
- _testEof339:
- m.cs = 339
- goto _testEof
- _testEof340:
- m.cs = 340
- goto _testEof
- _testEof341:
- m.cs = 341
- goto _testEof
- _testEof342:
- m.cs = 342
- goto _testEof
- _testEof343:
- m.cs = 343
- goto _testEof
- _testEof344:
- m.cs = 344
- goto _testEof
- _testEof345:
- m.cs = 345
- goto _testEof
- _testEof346:
- m.cs = 346
- goto _testEof
- _testEof347:
- m.cs = 347
- goto _testEof
- _testEof348:
- m.cs = 348
- goto _testEof
- _testEof349:
- m.cs = 349
- goto _testEof
- _testEof350:
- m.cs = 350
- goto _testEof
- _testEof351:
- m.cs = 351
- goto _testEof
- _testEof352:
- m.cs = 352
- goto _testEof
- _testEof353:
- m.cs = 353
- goto _testEof
- _testEof354:
- m.cs = 354
- goto _testEof
- _testEof355:
- m.cs = 355
- goto _testEof
- _testEof356:
- m.cs = 356
- goto _testEof
- _testEof357:
- m.cs = 357
- goto _testEof
- _testEof358:
- m.cs = 358
- goto _testEof
- _testEof359:
- m.cs = 359
- goto _testEof
- _testEof360:
- m.cs = 360
- goto _testEof
- _testEof361:
- m.cs = 361
- goto _testEof
- _testEof362:
- m.cs = 362
- goto _testEof
- _testEof363:
- m.cs = 363
- goto _testEof
- _testEof364:
- m.cs = 364
- goto _testEof
- _testEof365:
- m.cs = 365
- goto _testEof
- _testEof366:
- m.cs = 366
- goto _testEof
- _testEof367:
- m.cs = 367
- goto _testEof
- _testEof368:
- m.cs = 368
- goto _testEof
- _testEof23:
- m.cs = 23
- goto _testEof
- _testEof24:
- m.cs = 24
- goto _testEof
- _testEof25:
- m.cs = 25
- goto _testEof
- _testEof26:
- m.cs = 26
- goto _testEof
- _testEof369:
- m.cs = 369
- goto _testEof
- _testEof370:
- m.cs = 370
- goto _testEof
- _testEof371:
- m.cs = 371
- goto _testEof
- _testEof372:
- m.cs = 372
- goto _testEof
- _testEof27:
- m.cs = 27
- goto _testEof
- _testEof28:
- m.cs = 28
- goto _testEof
- _testEof29:
- m.cs = 29
- goto _testEof
- _testEof30:
- m.cs = 30
- goto _testEof
- _testEof31:
- m.cs = 31
- goto _testEof
- _testEof32:
- m.cs = 32
- goto _testEof
- _testEof33:
- m.cs = 33
- goto _testEof
- _testEof34:
- m.cs = 34
- goto _testEof
- _testEof35:
- m.cs = 35
- goto _testEof
- _testEof36:
- m.cs = 36
- goto _testEof
- _testEof37:
- m.cs = 37
- goto _testEof
- _testEof38:
- m.cs = 38
- goto _testEof
- _testEof39:
- m.cs = 39
- goto _testEof
- _testEof40:
- m.cs = 40
- goto _testEof
- _testEof41:
- m.cs = 41
- goto _testEof
- _testEof42:
- m.cs = 42
- goto _testEof
- _testEof43:
- m.cs = 43
- goto _testEof
- _testEof44:
- m.cs = 44
- goto _testEof
- _testEof45:
- m.cs = 45
- goto _testEof
- _testEof46:
- m.cs = 46
- goto _testEof
- _testEof47:
- m.cs = 47
- goto _testEof
- _testEof48:
- m.cs = 48
- goto _testEof
- _testEof49:
- m.cs = 49
- goto _testEof
- _testEof50:
- m.cs = 50
- goto _testEof
- _testEof51:
- m.cs = 51
- goto _testEof
- _testEof52:
- m.cs = 52
- goto _testEof
- _testEof53:
- m.cs = 53
- goto _testEof
- _testEof54:
- m.cs = 54
- goto _testEof
- _testEof55:
- m.cs = 55
- goto _testEof
- _testEof56:
- m.cs = 56
- goto _testEof
- _testEof57:
- m.cs = 57
- goto _testEof
- _testEof58:
- m.cs = 58
- goto _testEof
- _testEof59:
- m.cs = 59
- goto _testEof
- _testEof60:
- m.cs = 60
- goto _testEof
- _testEof61:
- m.cs = 61
- goto _testEof
- _testEof62:
- m.cs = 62
- goto _testEof
- _testEof63:
- m.cs = 63
- goto _testEof
- _testEof64:
- m.cs = 64
- goto _testEof
- _testEof65:
- m.cs = 65
- goto _testEof
- _testEof66:
- m.cs = 66
- goto _testEof
- _testEof67:
- m.cs = 67
- goto _testEof
- _testEof68:
- m.cs = 68
- goto _testEof
- _testEof69:
- m.cs = 69
- goto _testEof
- _testEof70:
- m.cs = 70
- goto _testEof
- _testEof71:
- m.cs = 71
- goto _testEof
- _testEof72:
- m.cs = 72
- goto _testEof
- _testEof73:
- m.cs = 73
- goto _testEof
- _testEof74:
- m.cs = 74
- goto _testEof
- _testEof75:
- m.cs = 75
- goto _testEof
- _testEof76:
- m.cs = 76
- goto _testEof
- _testEof77:
- m.cs = 77
- goto _testEof
- _testEof78:
- m.cs = 78
- goto _testEof
- _testEof79:
- m.cs = 79
- goto _testEof
- _testEof80:
- m.cs = 80
- goto _testEof
- _testEof81:
- m.cs = 81
- goto _testEof
- _testEof82:
- m.cs = 82
- goto _testEof
- _testEof83:
- m.cs = 83
- goto _testEof
- _testEof84:
- m.cs = 84
- goto _testEof
- _testEof85:
- m.cs = 85
- goto _testEof
- _testEof86:
- m.cs = 86
- goto _testEof
- _testEof87:
- m.cs = 87
- goto _testEof
- _testEof88:
- m.cs = 88
- goto _testEof
- _testEof89:
- m.cs = 89
- goto _testEof
- _testEof90:
- m.cs = 90
- goto _testEof
- _testEof91:
- m.cs = 91
- goto _testEof
- _testEof92:
- m.cs = 92
- goto _testEof
- _testEof93:
- m.cs = 93
- goto _testEof
- _testEof94:
- m.cs = 94
- goto _testEof
- _testEof95:
- m.cs = 95
- goto _testEof
- _testEof96:
- m.cs = 96
- goto _testEof
- _testEof97:
- m.cs = 97
- goto _testEof
- _testEof98:
- m.cs = 98
- goto _testEof
- _testEof99:
- m.cs = 99
- goto _testEof
- _testEof100:
- m.cs = 100
- goto _testEof
- _testEof101:
- m.cs = 101
- goto _testEof
- _testEof102:
- m.cs = 102
- goto _testEof
- _testEof103:
- m.cs = 103
- goto _testEof
- _testEof104:
- m.cs = 104
- goto _testEof
- _testEof105:
- m.cs = 105
- goto _testEof
- _testEof106:
- m.cs = 106
- goto _testEof
- _testEof107:
- m.cs = 107
- goto _testEof
- _testEof108:
- m.cs = 108
- goto _testEof
- _testEof109:
- m.cs = 109
- goto _testEof
- _testEof110:
- m.cs = 110
- goto _testEof
- _testEof111:
- m.cs = 111
- goto _testEof
- _testEof112:
- m.cs = 112
- goto _testEof
- _testEof113:
- m.cs = 113
- goto _testEof
- _testEof114:
- m.cs = 114
- goto _testEof
- _testEof115:
- m.cs = 115
- goto _testEof
- _testEof116:
- m.cs = 116
- goto _testEof
- _testEof117:
- m.cs = 117
- goto _testEof
- _testEof118:
- m.cs = 118
- goto _testEof
- _testEof119:
- m.cs = 119
- goto _testEof
- _testEof120:
- m.cs = 120
- goto _testEof
- _testEof121:
- m.cs = 121
- goto _testEof
- _testEof122:
- m.cs = 122
- goto _testEof
- _testEof123:
- m.cs = 123
- goto _testEof
- _testEof124:
- m.cs = 124
- goto _testEof
- _testEof125:
- m.cs = 125
- goto _testEof
- _testEof126:
- m.cs = 126
- goto _testEof
- _testEof127:
- m.cs = 127
- goto _testEof
- _testEof128:
- m.cs = 128
- goto _testEof
- _testEof129:
- m.cs = 129
- goto _testEof
- _testEof130:
- m.cs = 130
- goto _testEof
- _testEof131:
- m.cs = 131
- goto _testEof
- _testEof132:
- m.cs = 132
- goto _testEof
- _testEof133:
- m.cs = 133
- goto _testEof
- _testEof134:
- m.cs = 134
- goto _testEof
- _testEof135:
- m.cs = 135
- goto _testEof
- _testEof136:
- m.cs = 136
- goto _testEof
- _testEof137:
- m.cs = 137
- goto _testEof
- _testEof138:
- m.cs = 138
- goto _testEof
- _testEof139:
- m.cs = 139
- goto _testEof
- _testEof140:
- m.cs = 140
- goto _testEof
- _testEof141:
- m.cs = 141
- goto _testEof
- _testEof142:
- m.cs = 142
- goto _testEof
- _testEof143:
- m.cs = 143
- goto _testEof
- _testEof144:
- m.cs = 144
- goto _testEof
- _testEof145:
- m.cs = 145
- goto _testEof
- _testEof146:
- m.cs = 146
- goto _testEof
- _testEof147:
- m.cs = 147
- goto _testEof
- _testEof148:
- m.cs = 148
- goto _testEof
- _testEof149:
- m.cs = 149
- goto _testEof
- _testEof150:
- m.cs = 150
- goto _testEof
- _testEof151:
- m.cs = 151
- goto _testEof
- _testEof152:
- m.cs = 152
- goto _testEof
- _testEof153:
- m.cs = 153
- goto _testEof
- _testEof154:
- m.cs = 154
- goto _testEof
- _testEof155:
- m.cs = 155
- goto _testEof
- _testEof156:
- m.cs = 156
- goto _testEof
- _testEof157:
- m.cs = 157
- goto _testEof
- _testEof158:
- m.cs = 158
- goto _testEof
- _testEof159:
- m.cs = 159
- goto _testEof
- _testEof160:
- m.cs = 160
- goto _testEof
- _testEof161:
- m.cs = 161
- goto _testEof
- _testEof162:
- m.cs = 162
- goto _testEof
- _testEof163:
- m.cs = 163
- goto _testEof
- _testEof164:
- m.cs = 164
- goto _testEof
- _testEof165:
- m.cs = 165
- goto _testEof
- _testEof166:
- m.cs = 166
- goto _testEof
- _testEof167:
- m.cs = 167
- goto _testEof
- _testEof168:
- m.cs = 168
- goto _testEof
- _testEof169:
- m.cs = 169
- goto _testEof
- _testEof170:
- m.cs = 170
- goto _testEof
- _testEof171:
- m.cs = 171
- goto _testEof
- _testEof172:
- m.cs = 172
- goto _testEof
- _testEof173:
- m.cs = 173
- goto _testEof
- _testEof174:
- m.cs = 174
- goto _testEof
- _testEof175:
- m.cs = 175
- goto _testEof
- _testEof176:
- m.cs = 176
- goto _testEof
- _testEof177:
- m.cs = 177
- goto _testEof
- _testEof178:
- m.cs = 178
- goto _testEof
- _testEof179:
- m.cs = 179
- goto _testEof
- _testEof180:
- m.cs = 180
- goto _testEof
- _testEof181:
- m.cs = 181
- goto _testEof
- _testEof182:
- m.cs = 182
- goto _testEof
- _testEof183:
- m.cs = 183
- goto _testEof
- _testEof184:
- m.cs = 184
- goto _testEof
- _testEof185:
- m.cs = 185
- goto _testEof
- _testEof186:
- m.cs = 186
- goto _testEof
- _testEof187:
- m.cs = 187
- goto _testEof
- _testEof188:
- m.cs = 188
- goto _testEof
- _testEof189:
- m.cs = 189
- goto _testEof
- _testEof190:
- m.cs = 190
- goto _testEof
- _testEof191:
- m.cs = 191
- goto _testEof
- _testEof192:
- m.cs = 192
- goto _testEof
- _testEof193:
- m.cs = 193
- goto _testEof
- _testEof194:
- m.cs = 194
- goto _testEof
- _testEof195:
- m.cs = 195
- goto _testEof
- _testEof196:
- m.cs = 196
- goto _testEof
- _testEof197:
- m.cs = 197
- goto _testEof
- _testEof198:
- m.cs = 198
- goto _testEof
- _testEof199:
- m.cs = 199
- goto _testEof
- _testEof200:
- m.cs = 200
- goto _testEof
- _testEof201:
- m.cs = 201
- goto _testEof
- _testEof202:
- m.cs = 202
- goto _testEof
- _testEof203:
- m.cs = 203
- goto _testEof
- _testEof204:
- m.cs = 204
- goto _testEof
- _testEof205:
- m.cs = 205
- goto _testEof
- _testEof206:
- m.cs = 206
- goto _testEof
- _testEof207:
- m.cs = 207
- goto _testEof
- _testEof208:
- m.cs = 208
- goto _testEof
- _testEof209:
- m.cs = 209
- goto _testEof
- _testEof210:
- m.cs = 210
- goto _testEof
- _testEof211:
- m.cs = 211
- goto _testEof
- _testEof212:
- m.cs = 212
- goto _testEof
- _testEof213:
- m.cs = 213
- goto _testEof
- _testEof214:
- m.cs = 214
- goto _testEof
- _testEof215:
- m.cs = 215
- goto _testEof
- _testEof216:
- m.cs = 216
- goto _testEof
- _testEof217:
- m.cs = 217
- goto _testEof
- _testEof218:
- m.cs = 218
- goto _testEof
- _testEof219:
- m.cs = 219
- goto _testEof
- _testEof220:
- m.cs = 220
- goto _testEof
- _testEof221:
- m.cs = 221
- goto _testEof
- _testEof222:
- m.cs = 222
- goto _testEof
- _testEof223:
- m.cs = 223
- goto _testEof
- _testEof224:
- m.cs = 224
- goto _testEof
- _testEof225:
- m.cs = 225
- goto _testEof
- _testEof226:
- m.cs = 226
- goto _testEof
- _testEof227:
- m.cs = 227
- goto _testEof
- _testEof228:
- m.cs = 228
- goto _testEof
- _testEof229:
- m.cs = 229
- goto _testEof
- _testEof230:
- m.cs = 230
- goto _testEof
- _testEof231:
- m.cs = 231
- goto _testEof
- _testEof232:
- m.cs = 232
- goto _testEof
- _testEof233:
- m.cs = 233
- goto _testEof
- _testEof234:
- m.cs = 234
- goto _testEof
- _testEof235:
- m.cs = 235
- goto _testEof
- _testEof236:
- m.cs = 236
- goto _testEof
- _testEof237:
- m.cs = 237
- goto _testEof
- _testEof238:
- m.cs = 238
- goto _testEof
- _testEof239:
- m.cs = 239
- goto _testEof
- _testEof240:
- m.cs = 240
- goto _testEof
- _testEof241:
- m.cs = 241
- goto _testEof
- _testEof242:
- m.cs = 242
- goto _testEof
- _testEof243:
- m.cs = 243
- goto _testEof
- _testEof244:
- m.cs = 244
- goto _testEof
- _testEof245:
- m.cs = 245
- goto _testEof
- _testEof246:
- m.cs = 246
- goto _testEof
- _testEof247:
- m.cs = 247
- goto _testEof
- _testEof248:
- m.cs = 248
- goto _testEof
- _testEof249:
- m.cs = 249
- goto _testEof
- _testEof250:
- m.cs = 250
- goto _testEof
- _testEof251:
- m.cs = 251
- goto _testEof
- _testEof252:
- m.cs = 252
- goto _testEof
- _testEof253:
- m.cs = 253
- goto _testEof
- _testEof254:
- m.cs = 254
- goto _testEof
- _testEof255:
- m.cs = 255
- goto _testEof
- _testEof256:
- m.cs = 256
- goto _testEof
- _testEof257:
- m.cs = 257
- goto _testEof
- _testEof258:
- m.cs = 258
- goto _testEof
- _testEof259:
- m.cs = 259
- goto _testEof
- _testEof260:
- m.cs = 260
- goto _testEof
- _testEof261:
- m.cs = 261
- goto _testEof
- _testEof262:
- m.cs = 262
- goto _testEof
- _testEof263:
- m.cs = 263
- goto _testEof
- _testEof264:
- m.cs = 264
- goto _testEof
- _testEof265:
- m.cs = 265
- goto _testEof
- _testEof266:
- m.cs = 266
- goto _testEof
- _testEof267:
- m.cs = 267
- goto _testEof
- _testEof268:
- m.cs = 268
- goto _testEof
- _testEof269:
- m.cs = 269
- goto _testEof
- _testEof270:
- m.cs = 270
- goto _testEof
- _testEof271:
- m.cs = 271
- goto _testEof
- _testEof272:
- m.cs = 272
- goto _testEof
- _testEof273:
- m.cs = 273
- goto _testEof
- _testEof274:
- m.cs = 274
- goto _testEof
- _testEof275:
- m.cs = 275
- goto _testEof
- _testEof276:
- m.cs = 276
- goto _testEof
- _testEof277:
- m.cs = 277
- goto _testEof
- _testEof278:
- m.cs = 278
- goto _testEof
- _testEof279:
- m.cs = 279
- goto _testEof
- _testEof280:
- m.cs = 280
- goto _testEof
- _testEof281:
- m.cs = 281
- goto _testEof
- _testEof282:
- m.cs = 282
- goto _testEof
- _testEof283:
- m.cs = 283
- goto _testEof
- _testEof284:
- m.cs = 284
- goto _testEof
- _testEof285:
- m.cs = 285
- goto _testEof
- _testEof286:
- m.cs = 286
- goto _testEof
- _testEof287:
- m.cs = 287
- goto _testEof
- _testEof288:
- m.cs = 288
- goto _testEof
- _testEof289:
- m.cs = 289
- goto _testEof
- _testEof290:
- m.cs = 290
- goto _testEof
- _testEof291:
- m.cs = 291
- goto _testEof
- _testEof292:
- m.cs = 292
- goto _testEof
- _testEof293:
- m.cs = 293
- goto _testEof
- _testEof294:
- m.cs = 294
- goto _testEof
- _testEof295:
- m.cs = 295
- goto _testEof
- _testEof296:
- m.cs = 296
- goto _testEof
- _testEof297:
- m.cs = 297
- goto _testEof
- _testEof298:
- m.cs = 298
- goto _testEof
- _testEof299:
- m.cs = 299
- goto _testEof
- _testEof300:
- m.cs = 300
- goto _testEof
- _testEof301:
- m.cs = 301
- goto _testEof
- _testEof302:
- m.cs = 302
- goto _testEof
- _testEof303:
- m.cs = 303
- goto _testEof
- _testEof304:
- m.cs = 304
- goto _testEof
- _testEof305:
- m.cs = 305
- goto _testEof
- _testEof306:
- m.cs = 306
- goto _testEof
- _testEof307:
- m.cs = 307
- goto _testEof
- _testEof308:
- m.cs = 308
- goto _testEof
- _testEof309:
- m.cs = 309
- goto _testEof
- _testEof310:
- m.cs = 310
- goto _testEof
- _testEof311:
- m.cs = 311
- goto _testEof
- _testEof312:
- m.cs = 312
- goto _testEof
- _testEof313:
- m.cs = 313
- goto _testEof
- _testEof314:
- m.cs = 314
- goto _testEof
- _testEof315:
- m.cs = 315
- goto _testEof
- _testEof316:
- m.cs = 316
- goto _testEof
- _testEof317:
- m.cs = 317
- goto _testEof
- _testEof318:
- m.cs = 318
- goto _testEof
- _testEof319:
- m.cs = 319
- goto _testEof
- _testEof320:
- m.cs = 320
- goto _testEof
- _testEof321:
- m.cs = 321
- goto _testEof
- _testEof322:
- m.cs = 322
- goto _testEof
- _testEof323:
- m.cs = 323
- goto _testEof
- _testEof324:
- m.cs = 324
- goto _testEof
- _testEof325:
- m.cs = 325
- goto _testEof
- _testEof326:
- m.cs = 326
- goto _testEof
- _testEof327:
- m.cs = 327
- goto _testEof
- _testEof328:
- m.cs = 328
- goto _testEof
- _testEof329:
- m.cs = 329
- goto _testEof
- _testEof330:
- m.cs = 330
- goto _testEof
- _testEof331:
- m.cs = 331
- goto _testEof
- _testEof332:
- m.cs = 332
- goto _testEof
- _testEof373:
- m.cs = 373
- goto _testEof
-
- _testEof:
- {
- }
- if (m.p) == (m.eof) {
- switch m.cs {
- case 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372:
-
- output.message = string(m.text())
-
- case 1:
-
- m.err = fmt.Errorf(errPri, m.p)
- (m.p)--
-
- {
- goto st373
- }
-
- case 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299:
-
- m.err = fmt.Errorf(errTimestamp, m.p)
- (m.p)--
-
- {
- goto st373
- }
-
- case 318, 319, 320, 321, 322, 323, 325:
-
- m.err = fmt.Errorf(errRFC3339, m.p)
- (m.p)--
-
- {
- goto st373
- }
-
- case 20, 21, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280:
-
- m.err = fmt.Errorf(errHostname, m.p)
- (m.p)--
-
- {
- goto st373
- }
-
- case 22:
-
- m.err = fmt.Errorf(errTag, m.p)
- (m.p)--
-
- {
- goto st373
- }
-
- case 2, 3, 330, 331, 332:
-
- m.err = fmt.Errorf(errPrival, m.p)
- (m.p)--
-
- {
- goto st373
- }
-
- m.err = fmt.Errorf(errPri, m.p)
- (m.p)--
-
- {
- goto st373
- }
-
- }
- }
-
- _out:
- {
- }
- }
-
- if m.cs < firstFinal || m.cs == enFail {
- if m.bestEffort && output.minimal() {
- // An error occurred but partial parsing is on and partial message is minimally valid
- return output.export(), m.err
- }
- return nil, m.err
- }
-
- return output.export(), nil
-}
diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/interface.go b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/interface.go
index 8ee9949111..90d9464918 100644
--- a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/interface.go
+++ b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/interface.go
@@ -24,6 +24,9 @@ import (
// StrategyStore keeps track of service specific sampling strategies.
type StrategyStore interface {
+ // Close() from io.Closer stops the processor from calculating probabilities.
+ io.Closer
+
// GetSamplingStrategy retrieves the sampling strategy for the specified service.
GetSamplingStrategy(ctx context.Context, serviceName string) (*api_v2.SamplingStrategyResponse, error)
}
diff --git a/vendor/github.com/jaegertracing/jaeger/storage/factory.go b/vendor/github.com/jaegertracing/jaeger/storage/factory.go
index f6d3394840..b56e6fdc07 100644
--- a/vendor/github.com/jaegertracing/jaeger/storage/factory.go
+++ b/vendor/github.com/jaegertracing/jaeger/storage/factory.go
@@ -23,7 +23,7 @@ import (
"github.com/jaegertracing/jaeger/pkg/distributedlock"
"github.com/jaegertracing/jaeger/pkg/metrics"
"github.com/jaegertracing/jaeger/storage/dependencystore"
- metricsstore "github.com/jaegertracing/jaeger/storage/metricsstore"
+ "github.com/jaegertracing/jaeger/storage/metricsstore"
"github.com/jaegertracing/jaeger/storage/samplingstore"
"github.com/jaegertracing/jaeger/storage/spanstore"
)
@@ -49,6 +49,13 @@ type Factory interface {
CreateDependencyReader() (dependencystore.Reader, error)
}
+// Purger defines an interface that is capable of purging the storage.
+// Only meant to be used from integration tests.
+type Purger interface {
+ // Purge removes all data from the storage.
+ Purge() error
+}
+
// SamplingStoreFactory defines an interface that is capable of returning the necessary backends for
// adaptive sampling.
type SamplingStoreFactory interface {
diff --git a/vendor/github.com/kardianos/osext/LICENSE b/vendor/github.com/kardianos/osext/LICENSE
new file mode 100644
index 0000000000..7448756763
--- /dev/null
+++ b/vendor/github.com/kardianos/osext/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/kardianos/osext/README.md b/vendor/github.com/kardianos/osext/README.md
new file mode 100644
index 0000000000..15cbc3d953
--- /dev/null
+++ b/vendor/github.com/kardianos/osext/README.md
@@ -0,0 +1,21 @@
+### Extensions to the "os" package.
+
+[![GoDoc](https://godoc.org/github.com/kardianos/osext?status.svg)](https://godoc.org/github.com/kardianos/osext)
+
+## Find the current Executable and ExecutableFolder.
+
+As of go1.8 the Executable function may be found in `os`. The Executable function
+in the std lib `os` package is used if available.
+
+There is sometimes utility in finding the current executable file
+that is running. This can be used for upgrading the current executable
+or finding resources located relative to the executable file. Both
+working directory and the os.Args[0] value are arbitrary and cannot
+be relied on; os.Args[0] can be "faked".
+
+Multi-platform and supports:
+ * Linux
+ * OS X
+ * Windows
+ * Plan 9
+ * BSDs.
diff --git a/vendor/github.com/kardianos/osext/osext.go b/vendor/github.com/kardianos/osext/osext.go
new file mode 100644
index 0000000000..17f380f0e8
--- /dev/null
+++ b/vendor/github.com/kardianos/osext/osext.go
@@ -0,0 +1,33 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Extensions to the standard "os" package.
+package osext // import "github.com/kardianos/osext"
+
+import "path/filepath"
+
+var cx, ce = executableClean()
+
+func executableClean() (string, error) {
+ p, err := executable()
+ return filepath.Clean(p), err
+}
+
+// Executable returns an absolute path that can be used to
+// re-invoke the current program.
+// It may not be valid after the current program exits.
+func Executable() (string, error) {
+ return cx, ce
+}
+
+// Returns same path as Executable, returns just the folder
+// path. Excludes the executable name and any trailing slash.
+func ExecutableFolder() (string, error) {
+ p, err := Executable()
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Dir(p), nil
+}
diff --git a/vendor/github.com/kardianos/osext/osext_go18.go b/vendor/github.com/kardianos/osext/osext_go18.go
new file mode 100644
index 0000000000..009d8a9262
--- /dev/null
+++ b/vendor/github.com/kardianos/osext/osext_go18.go
@@ -0,0 +1,9 @@
+//+build go1.8,!openbsd
+
+package osext
+
+import "os"
+
+func executable() (string, error) {
+ return os.Executable()
+}
diff --git a/vendor/github.com/kardianos/osext/osext_plan9.go b/vendor/github.com/kardianos/osext/osext_plan9.go
new file mode 100644
index 0000000000..95e237137a
--- /dev/null
+++ b/vendor/github.com/kardianos/osext/osext_plan9.go
@@ -0,0 +1,22 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//+build !go1.8
+
+package osext
+
+import (
+ "os"
+ "strconv"
+ "syscall"
+)
+
+func executable() (string, error) {
+ f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text")
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+ return syscall.Fd2path(int(f.Fd()))
+}
diff --git a/vendor/github.com/kardianos/osext/osext_procfs.go b/vendor/github.com/kardianos/osext/osext_procfs.go
new file mode 100644
index 0000000000..e1f16f8851
--- /dev/null
+++ b/vendor/github.com/kardianos/osext/osext_procfs.go
@@ -0,0 +1,36 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8,android !go1.8,linux !go1.8,netbsd !go1.8,solaris !go1.8,dragonfly
+
+package osext
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+)
+
+func executable() (string, error) {
+ switch runtime.GOOS {
+ case "linux", "android":
+ const deletedTag = " (deleted)"
+ execpath, err := os.Readlink("/proc/self/exe")
+ if err != nil {
+ return execpath, err
+ }
+ execpath = strings.TrimSuffix(execpath, deletedTag)
+ execpath = strings.TrimPrefix(execpath, deletedTag)
+ return execpath, nil
+ case "netbsd":
+ return os.Readlink("/proc/curproc/exe")
+ case "dragonfly":
+ return os.Readlink("/proc/curproc/file")
+ case "solaris":
+ return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid()))
+ }
+ return "", errors.New("ExecPath not implemented for " + runtime.GOOS)
+}
diff --git a/vendor/github.com/kardianos/osext/osext_sysctl.go b/vendor/github.com/kardianos/osext/osext_sysctl.go
new file mode 100644
index 0000000000..33cee2522b
--- /dev/null
+++ b/vendor/github.com/kardianos/osext/osext_sysctl.go
@@ -0,0 +1,126 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8,darwin !go1.8,freebsd openbsd
+
+package osext
+
+import (
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "syscall"
+ "unsafe"
+)
+
+var initCwd, initCwdErr = os.Getwd()
+
+func executable() (string, error) {
+ var mib [4]int32
+ switch runtime.GOOS {
+ case "freebsd":
+ mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1}
+ case "darwin":
+ mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1}
+ case "openbsd":
+ mib = [4]int32{1 /* CTL_KERN */, 55 /* KERN_PROC_ARGS */, int32(os.Getpid()), 1 /* KERN_PROC_ARGV */}
+ }
+
+ n := uintptr(0)
+ // Get length.
+ _, _, errNum := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
+ if errNum != 0 {
+ return "", errNum
+ }
+ if n == 0 { // This shouldn't happen.
+ return "", nil
+ }
+ buf := make([]byte, n)
+ _, _, errNum = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0)
+ if errNum != 0 {
+ return "", errNum
+ }
+ if n == 0 { // This shouldn't happen.
+ return "", nil
+ }
+
+ var execPath string
+ switch runtime.GOOS {
+ case "openbsd":
+ // buf now contains **argv, with pointers to each of the C-style
+ // NULL terminated arguments.
+ var args []string
+ argv := uintptr(unsafe.Pointer(&buf[0]))
+ Loop:
+ for {
+ argp := *(**[1 << 20]byte)(unsafe.Pointer(argv))
+ if argp == nil {
+ break
+ }
+ for i := 0; uintptr(i) < n; i++ {
+ // we don't want the full arguments list
+ if string(argp[i]) == " " {
+ break Loop
+ }
+ if argp[i] != 0 {
+ continue
+ }
+ args = append(args, string(argp[:i]))
+ n -= uintptr(i)
+ break
+ }
+ if n < unsafe.Sizeof(argv) {
+ break
+ }
+ argv += unsafe.Sizeof(argv)
+ n -= unsafe.Sizeof(argv)
+ }
+ execPath = args[0]
+ // There is no canonical way to get an executable path on
+ // OpenBSD, so check PATH in case we are called directly
+ if execPath[0] != '/' && execPath[0] != '.' {
+ execIsInPath, err := exec.LookPath(execPath)
+ if err == nil {
+ execPath = execIsInPath
+ }
+ }
+ default:
+ for i, v := range buf {
+ if v == 0 {
+ buf = buf[:i]
+ break
+ }
+ }
+ execPath = string(buf)
+ }
+
+ var err error
+ // execPath will not be empty due to above checks.
+ // Try to get the absolute path if the execPath is not rooted.
+ if execPath[0] != '/' {
+ execPath, err = getAbs(execPath)
+ if err != nil {
+ return execPath, err
+ }
+ }
+ // For darwin KERN_PROCARGS may return the path to a symlink rather than the
+ // actual executable.
+ if runtime.GOOS == "darwin" {
+ if execPath, err = filepath.EvalSymlinks(execPath); err != nil {
+ return execPath, err
+ }
+ }
+ return execPath, nil
+}
+
+func getAbs(execPath string) (string, error) {
+ if initCwdErr != nil {
+ return execPath, initCwdErr
+ }
+ // The execPath may begin with a "../" or a "./" so clean it first.
+ // Join the two paths, trailing and starting slashes undetermined, so use
+ // the generic Join function.
+ return filepath.Join(initCwd, filepath.Clean(execPath)), nil
+}
diff --git a/vendor/github.com/kardianos/osext/osext_windows.go b/vendor/github.com/kardianos/osext/osext_windows.go
new file mode 100644
index 0000000000..074b3b385c
--- /dev/null
+++ b/vendor/github.com/kardianos/osext/osext_windows.go
@@ -0,0 +1,36 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//+build !go1.8
+
+package osext
+
+import (
+ "syscall"
+ "unicode/utf16"
+ "unsafe"
+)
+
+var (
+ kernel = syscall.MustLoadDLL("kernel32.dll")
+ getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW")
+)
+
+// GetModuleFileName() with hModule = NULL
+func executable() (exePath string, err error) {
+ return getModuleFileName()
+}
+
+func getModuleFileName() (string, error) {
+ var n uint32
+ b := make([]uint16, syscall.MAX_PATH)
+ size := uint32(len(b))
+
+ r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size))
+ n = uint32(r0)
+ if n == 0 {
+ return "", e1
+ }
+ return string(utf16.Decode(b[0:n])), nil
+}
diff --git a/vendor/github.com/influxdata/go-syslog/v3/.gitignore b/vendor/github.com/leodido/go-syslog/v4/.gitignore
similarity index 100%
rename from vendor/github.com/influxdata/go-syslog/v3/.gitignore
rename to vendor/github.com/leodido/go-syslog/v4/.gitignore
diff --git a/vendor/github.com/leodido/go-syslog/v4/.goreleaser.yml b/vendor/github.com/leodido/go-syslog/v4/.goreleaser.yml
new file mode 100644
index 0000000000..429775de26
--- /dev/null
+++ b/vendor/github.com/leodido/go-syslog/v4/.goreleaser.yml
@@ -0,0 +1,16 @@
+release:
+ prerelease: auto
+ draft: false
+ name_template: "v{{.Version}}"
+
+before:
+ hooks:
+ - go mod tidy
+
+builds:
+ - skip: true
+
+# Docs at https://goreleaser.com/customization/changelog
+changelog:
+ use: github-native
+ sort: asc
\ No newline at end of file
diff --git a/vendor/github.com/influxdata/go-syslog/v3/LICENSE b/vendor/github.com/leodido/go-syslog/v4/LICENSE
similarity index 96%
rename from vendor/github.com/influxdata/go-syslog/v3/LICENSE
rename to vendor/github.com/leodido/go-syslog/v4/LICENSE
index fae3a682f4..609d84e046 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/LICENSE
+++ b/vendor/github.com/leodido/go-syslog/v4/LICENSE
@@ -1,6 +1,6 @@
The MIT License
-Copyright (c) 2018, InfluxData Inc.
+Copyright (c) 2018, Leonardo Di Donato
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/github.com/influxdata/go-syslog/v3/README.md b/vendor/github.com/leodido/go-syslog/v4/README.md
similarity index 74%
rename from vendor/github.com/influxdata/go-syslog/v3/README.md
rename to vendor/github.com/leodido/go-syslog/v4/README.md
index b1a23ec14a..5fdd7e1e69 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/README.md
+++ b/vendor/github.com/leodido/go-syslog/v4/README.md
@@ -6,6 +6,8 @@
_By [@leodido](https://github.com/leodido)_.
+_This is the official continuation of influxdata/go-syslog_.
+
To wrap up, this package provides:
- an [RFC5424-compliant parser and builder](/rfc5424)
@@ -24,12 +26,12 @@ For example:
## Installation
```
-go get github.com/influxdata/go-syslog/v3
+go get github.com/leodido/go-syslog/v4
```
## Docs
-[![Documentation](https://img.shields.io/badge/godoc-reference-blue.svg?style=for-the-badge)](http://godoc.org/github.com/influxdata/go-syslog)
+[![Documentation](https://img.shields.io/badge/godoc-reference-blue.svg?style=for-the-badge)](http://godoc.org/github.com/leodido/go-syslog)
The [docs](docs/) directory contains `.dot` files representing the finite-state machines (FSMs) implementing the syslog parsers and transports.
@@ -204,39 +206,37 @@ make bench
On my machine[1](#mymachine) these are the results obtained paring RFC5424 syslog messages with best effort mode on.
```
-[no]_empty_input__________________________________ 4524100 274 ns/op 272 B/op 4 allocs/op
-[no]_multiple_syslog_messages_on_multiple_lines___ 3039513 361 ns/op 288 B/op 8 allocs/op
-[no]_impossible_timestamp_________________________ 1244562 951 ns/op 512 B/op 11 allocs/op
-[no]_malformed_structured_data____________________ 2389249 512 ns/op 512 B/op 9 allocs/op
-[no]_with_duplicated_structured_data_id___________ 1000000 1183 ns/op 712 B/op 17 allocs/op
-[ok]_minimal______________________________________ 6876235 178 ns/op 227 B/op 5 allocs/op
-[ok]_average_message______________________________ 730473 1653 ns/op 1520 B/op 24 allocs/op
-[ok]_complicated_message__________________________ 908776 1344 ns/op 1264 B/op 24 allocs/op
-[ok]_very_long_message____________________________ 392737 3114 ns/op 2448 B/op 25 allocs/op
-[ok]_all_max_length_and_complete__________________ 510740 2431 ns/op 1872 B/op 28 allocs/op
-[ok]_all_max_length_except_structured_data_and_mes 755124 1593 ns/op 867 B/op 13 allocs/op
-[ok]_minimal_with_message_containing_newline______ 6142984 199 ns/op 230 B/op 6 allocs/op
-[ok]_w/o_procid,_w/o_structured_data,_with_message 1670286 732 ns/op 348 B/op 10 allocs/op
-[ok]_minimal_with_UTF-8_message___________________ 3013480 407 ns/op 339 B/op 6 allocs/op
-[ok]_minimal_with_UTF-8_message_starting_with_BOM_ 2926410 423 ns/op 355 B/op 6 allocs/op
-[ok]_with_structured_data_id,_w/o_structured_data_ 1558971 814 ns/op 570 B/op 11 allocs/op
-[ok]_with_multiple_structured_data________________ 1000000 1243 ns/op 1205 B/op 16 allocs/op
-[ok]_with_escaped_backslash_within_structured_data 1000000 1025 ns/op 896 B/op 17 allocs/op
-[ok]_with_UTF-8_structured_data_param_value,_with_ 1000000 1241 ns/op 1034 B/op 19 allocs/op
+[no]_empty_input__________________________________-10 32072733 185.3 ns/op 272 B/op 4 allocs/op
+[no]_multiple_syslog_messages_on_multiple_lines___-10 27058381 219.8 ns/op 267 B/op 7 allocs/op
+[no]_impossible_timestamp_________________________-10 8732960 683.8 ns/op 555 B/op 12 allocs/op
+[no]_malformed_structured_data____________________-10 17997814 335.6 ns/op 499 B/op 8 allocs/op
+[no]_with_duplicated_structured_data_id___________-10 9254920 645.7 ns/op 672 B/op 15 allocs/op
+[ok]_minimal______________________________________-10 48347473 123.2 ns/op 227 B/op 5 allocs/op
+[ok]_average_message______________________________-10 6058492 986.8 ns/op 1344 B/op 20 allocs/op
+[ok]_complicated_message__________________________-10 7052536 843.2 ns/op 1232 B/op 23 allocs/op
+[ok]_very_long_message____________________________-10 2644068 2279.0 ns/op 2272 B/op 21 allocs/op
+[ok]_all_max_length_and_complete__________________-10 3611186 1675.0 ns/op 1848 B/op 27 allocs/op
+[ok]_all_max_length_except_structured_data_and_mes-10 5729514 1059.0 ns/op 851 B/op 12 allocs/op
+[ok]_minimal_with_message_containing_newline______-10 43165338 142.9 ns/op 230 B/op 6 allocs/op
+[ok]_w/o_procid,_w/o_structured_data,_with_message-10 14832892 397.8 ns/op 308 B/op 9 allocs/op
+[ok]_minimal_with_UTF-8_message___________________-10 20229313 306.2 ns/op 339 B/op 6 allocs/op
+[ok]_minimal_with_UTF-8_message_starting_with_BOM_-10 19721539 306.7 ns/op 355 B/op 6 allocs/op
+[ok]_with_structured_data_id,_w/o_structured_data_-10 13860580 435.7 ns/op 538 B/op 10 allocs/op
+[ok]_with_multiple_structured_data________________-10 8368731 721.9 ns/op 1173 B/op 15 allocs/op
+[ok]_with_escaped_backslash_within_structured_data-10 9730569 632.6 ns/op 864 B/op 16 allocs/op
+[ok]_with_UTF-8_structured_data_param_value,_with_-10 8864156 660.6 ns/op 858 B/op 15 allocs/op
```
As you can see it takes:
-* ~250ns to parse the smallest legal message
+* ~125ns to parse the smallest legal message
-* less than 2µs to parse an average legal message
+* less than 1µs to parse an average legal message
-* ~3µs to parse a very long legal message
+* ~2µs to parse a very long legal message
Other RFC5424 implementations, like this [one](https://github.com/roguelazer/rust-syslog-rfc5424) in Rust, spend 8µs to parse an average legal message.
-_TBD: comparison against other Go parsers_.
-
---
-* [1]: Intel Core i7-8850H CPU @ 2.60GHz
+* [1]: Apple M1 Pro
diff --git a/vendor/github.com/influxdata/go-syslog/v3/common/common.rl b/vendor/github.com/leodido/go-syslog/v4/common/common.rl
similarity index 100%
rename from vendor/github.com/influxdata/go-syslog/v3/common/common.rl
rename to vendor/github.com/leodido/go-syslog/v4/common/common.rl
diff --git a/vendor/github.com/influxdata/go-syslog/v3/common/facility.go b/vendor/github.com/leodido/go-syslog/v4/common/facility.go
similarity index 100%
rename from vendor/github.com/influxdata/go-syslog/v3/common/facility.go
rename to vendor/github.com/leodido/go-syslog/v4/common/facility.go
diff --git a/vendor/github.com/influxdata/go-syslog/v3/common/functions.go b/vendor/github.com/leodido/go-syslog/v4/common/functions.go
similarity index 100%
rename from vendor/github.com/influxdata/go-syslog/v3/common/functions.go
rename to vendor/github.com/leodido/go-syslog/v4/common/functions.go
diff --git a/vendor/github.com/influxdata/go-syslog/v3/common/severity.go b/vendor/github.com/leodido/go-syslog/v4/common/severity.go
similarity index 99%
rename from vendor/github.com/influxdata/go-syslog/v3/common/severity.go
rename to vendor/github.com/leodido/go-syslog/v4/common/severity.go
index c8144ce7d6..70ab35adc3 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/common/severity.go
+++ b/vendor/github.com/leodido/go-syslog/v4/common/severity.go
@@ -35,4 +35,4 @@ var SeverityLevelsShort = map[uint8]string{
5: "notice",
6: "info",
7: "debug",
-}
\ No newline at end of file
+}
diff --git a/vendor/github.com/influxdata/go-syslog/v3/makefile b/vendor/github.com/leodido/go-syslog/v4/makefile
similarity index 91%
rename from vendor/github.com/influxdata/go-syslog/v3/makefile
rename to vendor/github.com/leodido/go-syslog/v4/makefile
index cb77f52d90..a04cb3669f 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/makefile
+++ b/vendor/github.com/leodido/go-syslog/v4/makefile
@@ -1,14 +1,13 @@
SHELL := /bin/bash
RAGEL := ragel -I common
+REMOVECOMMENTS := go run github.com/leodido/go-urn/tools/removecomments@master
+SNAKE2CAMEL := go run github.com/leodido/go-urn/tools/snake2camel@master
+GOFMT := go fmt
export GO_TEST=env GOTRACEBACK=all GO111MODULE=on go test $(GO_ARGS)
.PHONY: build
build: rfc5424/machine.go rfc5424/builder.go nontransparent/parser.go rfc3164/machine.go
- @gofmt -w -s ./rfc5424
- @gofmt -w -s ./rfc3164
- @gofmt -w -s ./octetcounting
- @gofmt -w -s ./nontransparent
rfc5424/machine.go: rfc5424/machine.go.rl common/common.rl
@@ -20,26 +19,21 @@ nontransparent/parser.go: nontransparent/parser.go.rl
rfc5424/builder.go rfc5424/machine.go:
$(RAGEL) -Z -G2 -e -o $@ $<
- @sed -i '/^\/\/line/d' $@
- $(MAKE) file=$@ snake2camel
+ $(REMOVECOMMENTS) $@
+ $(SNAKE2CAMEL) $@
+ $(GOFMT) $@
rfc3164/machine.go:
$(RAGEL) -Z -G2 -e -o $@ $<
- @sed -i '/^\/\/line/d' $@
- $(MAKE) file=$@ snake2camel
+ $(REMOVECOMMENTS) $@
+ $(SNAKE2CAMEL) $@
+ $(GOFMT) $@
nontransparent/parser.go:
$(RAGEL) -Z -G2 -e -o $@ $<
- @sed -i '/^\/\/line/d' $@
- $(MAKE) file=$@ snake2camel
-
-.PHONY: snake2camel
-snake2camel:
- @awk -i inplace '{ \
- while ( match($$0, /(.*)([a-z]+[0-9]*)_([a-zA-Z0-9])(.*)/, cap) ) \
- $$0 = cap[1] cap[2] toupper(cap[3]) cap[4]; \
- print \
- }' $(file)
+ $(REMOVECOMMENTS) $@
+ $(SNAKE2CAMEL) $@
+ $(GOFMT) $@
.PHONY: bench
bench: rfc5424/*_test.go rfc5424/machine.go octetcounting/performance_test.go
diff --git a/vendor/github.com/influxdata/go-syslog/v3/nontransparent/parser.go b/vendor/github.com/leodido/go-syslog/v4/nontransparent/parser.go
similarity index 86%
rename from vendor/github.com/influxdata/go-syslog/v3/nontransparent/parser.go
rename to vendor/github.com/leodido/go-syslog/v4/nontransparent/parser.go
index c5872020ab..c4bb37bce6 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/nontransparent/parser.go
+++ b/vendor/github.com/leodido/go-syslog/v4/nontransparent/parser.go
@@ -3,8 +3,9 @@ package nontransparent
import (
"io"
- syslog "github.com/influxdata/go-syslog/v3"
- "github.com/influxdata/go-syslog/v3/rfc5424"
+ syslog "github.com/leodido/go-syslog/v4"
+ "github.com/leodido/go-syslog/v4/rfc3164"
+ "github.com/leodido/go-syslog/v4/rfc5424"
parser "github.com/leodido/ragel-machinery/parser"
)
@@ -28,7 +29,6 @@ type machine struct {
func (m *machine) Exec(s *parser.State) (int, int) {
// Retrieve previously stored parsing variables
cs, p, pe, eof, data := s.Get()
-
{
var _widec int16
if p == pe {
@@ -162,7 +162,6 @@ func (m *machine) Exec(s *parser.State) (int, int) {
{
}
}
-
// Update parsing variables
s.Set(cs, p, pe, eof)
return p, pe
@@ -184,7 +183,8 @@ func (m *machine) OnCompletion() {
// Try to parse last chunk as a candidate
if m.readError != nil && len(m.lastChunk) > 0 {
res, err := m.internal.Parse(m.lastChunk)
- if err == nil {
+ if err == nil && !m.bestEffort {
+ res = nil
err = m.readError
}
m.emit(&syslog.Result{
@@ -218,7 +218,30 @@ func NewParser(options ...syslog.ParserOption) syslog.Parser {
return m
}
-// WithMaxMessageLength does nothing for this parser
+func NewParserRFC3164(options ...syslog.ParserOption) syslog.Parser {
+ m := &machine{
+ emit: func(*syslog.Result) { /* noop */ },
+ }
+
+ for _, opt := range options {
+ m = opt(m).(*machine)
+ }
+
+ // No error can happens since during its setting we check the trailer type passed in
+ trailer, _ := m.trailertyp.Value()
+ m.trailer = byte(trailer)
+
+ // Create internal parser depending on options
+ if m.bestEffort {
+ m.internal = rfc3164.NewMachine(rfc3164.WithBestEffort())
+ } else {
+ m.internal = rfc3164.NewMachine()
+ }
+
+ return m
+}
+
+// WithMaxMessageLength does nothing for this parser.
func (m *machine) WithMaxMessageLength(length int) {}
// HasBestEffort tells whether the receiving parser has best effort mode on or off.
diff --git a/vendor/github.com/influxdata/go-syslog/v3/nontransparent/parser.go.rl b/vendor/github.com/leodido/go-syslog/v4/nontransparent/parser.go.rl
similarity index 82%
rename from vendor/github.com/influxdata/go-syslog/v3/nontransparent/parser.go.rl
rename to vendor/github.com/leodido/go-syslog/v4/nontransparent/parser.go.rl
index 1dde434e6d..a110fb5620 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/nontransparent/parser.go.rl
+++ b/vendor/github.com/leodido/go-syslog/v4/nontransparent/parser.go.rl
@@ -4,8 +4,9 @@ import (
"io"
parser "github.com/leodido/ragel-machinery/parser"
- syslog "github.com/influxdata/go-syslog/v3"
- "github.com/influxdata/go-syslog/v3/rfc5424"
+ syslog "github.com/leodido/go-syslog/v4"
+ "github.com/leodido/go-syslog/v4/rfc5424"
+ "github.com/leodido/go-syslog/v4/rfc3164"
)
%%{
@@ -78,7 +79,8 @@ func (m *machine) OnCompletion() {
// Try to parse last chunk as a candidate
if m.readError != nil && len(m.lastChunk) > 0 {
res, err := m.internal.Parse(m.lastChunk)
- if err == nil {
+ if err == nil && !m.bestEffort {
+ res = nil
err = m.readError
}
m.emit(&syslog.Result{
@@ -112,6 +114,32 @@ func NewParser(options ...syslog.ParserOption) syslog.Parser {
return m
}
+func NewParserRFC3164(options ...syslog.ParserOption) syslog.Parser {
+ m := &machine{
+ emit: func(*syslog.Result) { /* noop */ },
+ }
+
+ for _, opt := range options {
+ m = opt(m).(*machine)
+ }
+
+ // No error can happens since during its setting we check the trailer type passed in
+ trailer, _ := m.trailertyp.Value()
+ m.trailer = byte(trailer)
+
+ // Create internal parser depending on options
+ if m.bestEffort {
+ m.internal = rfc3164.NewMachine(rfc3164.WithBestEffort())
+ } else {
+ m.internal = rfc3164.NewMachine()
+ }
+
+ return m
+}
+
+// WithMaxMessageLength does nothing for this parser.
+func (m *machine) WithMaxMessageLength(length int) {}
+
// HasBestEffort tells whether the receiving parser has best effort mode on or off.
func (m *machine) HasBestEffort() bool {
return m.bestEffort
diff --git a/vendor/github.com/influxdata/go-syslog/v3/nontransparent/trailer_type.go b/vendor/github.com/leodido/go-syslog/v4/nontransparent/trailer_type.go
similarity index 100%
rename from vendor/github.com/influxdata/go-syslog/v3/nontransparent/trailer_type.go
rename to vendor/github.com/leodido/go-syslog/v4/nontransparent/trailer_type.go
diff --git a/vendor/github.com/influxdata/go-syslog/v3/octetcounting/parser.go b/vendor/github.com/leodido/go-syslog/v4/octetcounting/parser.go
similarity index 86%
rename from vendor/github.com/influxdata/go-syslog/v3/octetcounting/parser.go
rename to vendor/github.com/leodido/go-syslog/v4/octetcounting/parser.go
index cbdbaacdd1..5a5d4f8618 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/octetcounting/parser.go
+++ b/vendor/github.com/leodido/go-syslog/v4/octetcounting/parser.go
@@ -4,8 +4,9 @@ import (
"fmt"
"io"
- syslog "github.com/influxdata/go-syslog/v3"
- "github.com/influxdata/go-syslog/v3/rfc5424"
+ syslog "github.com/leodido/go-syslog/v4"
+ "github.com/leodido/go-syslog/v4/rfc5424"
+ "github.com/leodido/go-syslog/v4/rfc3164"
)
// parser is capable to parse the input stream containing syslog messages with octetcounting framing.
@@ -43,6 +44,26 @@ func NewParser(opts ...syslog.ParserOption) syslog.Parser {
return p
}
+func NewParserRFC3164(opts ...syslog.ParserOption) syslog.Parser {
+ p := &parser{
+ emit: func(*syslog.Result) { /* noop */ },
+ maxMessageLength: 1024,
+ }
+
+ for _, opt := range opts {
+ p = opt(p).(*parser)
+ }
+
+ // Create internal parser depending on options
+ if p.bestEffort {
+ p.internal = rfc3164.NewMachine(rfc3164.WithBestEffort())
+ } else {
+ p.internal = rfc3164.NewMachine()
+ }
+
+ return p
+}
+
func (p *parser) WithMaxMessageLength(length int) {
p.maxMessageLength = length
}
@@ -134,7 +155,8 @@ func (p *parser) run() {
// Next we MUST see an EOF otherwise the parsing we'll start again
if tok = p.scan(); tok.typ == EOF {
break
- } else {
+ } else if tok.typ != LF {
+ // but some syslog may separate lines with octet by \n, ignore it
p.unscan()
}
}
diff --git a/vendor/github.com/influxdata/go-syslog/v3/octetcounting/scanner.go b/vendor/github.com/leodido/go-syslog/v4/octetcounting/scanner.go
similarity index 95%
rename from vendor/github.com/influxdata/go-syslog/v3/octetcounting/scanner.go
rename to vendor/github.com/leodido/go-syslog/v4/octetcounting/scanner.go
index 7e5f2bb2d7..7926515f93 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/octetcounting/scanner.go
+++ b/vendor/github.com/leodido/go-syslog/v4/octetcounting/scanner.go
@@ -10,6 +10,9 @@ import (
// eof represents a marker byte for the end of the reader
var eof = byte(0)
+// lf represents the NewLine
+var lf = byte(10)
+
// ws represents the whitespace
var ws = byte(32)
@@ -73,6 +76,12 @@ func (s *Scanner) Scan() (tok Token) {
return Token{
typ: EOF,
}
+ case lf:
+ s.ready = true
+ return Token{
+ typ: LF,
+ lit: []byte{lf},
+ }
case ws:
s.ready = true
return Token{
diff --git a/vendor/github.com/influxdata/go-syslog/v3/octetcounting/tokens.go b/vendor/github.com/leodido/go-syslog/v4/octetcounting/tokens.go
similarity index 87%
rename from vendor/github.com/influxdata/go-syslog/v3/octetcounting/tokens.go
rename to vendor/github.com/leodido/go-syslog/v4/octetcounting/tokens.go
index 18338451aa..2f555b2206 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/octetcounting/tokens.go
+++ b/vendor/github.com/leodido/go-syslog/v4/octetcounting/tokens.go
@@ -17,6 +17,7 @@ type TokenType int
const (
ILLEGAL TokenType = iota
EOF
+ LF
WS
MSGLEN
SYSLOGMSG
@@ -32,9 +33,9 @@ func (t Token) String() string {
}
}
-const tokentypename = "ILLEGALEOFWSMSGLENSYSLOGMSG"
+const tokentypename = "ILLEGALEOFLFWSMSGLENSYSLOGMSG"
-var tokentypeindex = [...]uint8{0, 7, 10, 12, 18, 27}
+var tokentypeindex = [...]uint8{0, 7, 10, 12, 14, 20, 29}
// String outputs the string representation of the receiving TokenType.
func (i TokenType) String() string {
diff --git a/vendor/github.com/influxdata/go-syslog/v3/options.go b/vendor/github.com/leodido/go-syslog/v4/options.go
similarity index 100%
rename from vendor/github.com/influxdata/go-syslog/v3/options.go
rename to vendor/github.com/leodido/go-syslog/v4/options.go
diff --git a/vendor/github.com/leodido/go-syslog/v4/rfc3164/machine.go b/vendor/github.com/leodido/go-syslog/v4/rfc3164/machine.go
new file mode 100644
index 0000000000..e779dac264
--- /dev/null
+++ b/vendor/github.com/leodido/go-syslog/v4/rfc3164/machine.go
@@ -0,0 +1,32210 @@
+package rfc3164
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/leodido/go-syslog/v4"
+ "github.com/leodido/go-syslog/v4/common"
+)
+
+var (
+ errPrival = "expecting a priority value in the range 1-191 or equal to 0 [col %d]"
+ errPri = "expecting a priority value within angle brackets [col %d]"
+ errTimestamp = "expecting a Stamp timestamp [col %d]"
+ errRFC3339 = "expecting a Stamp or a RFC3339 timestamp [col %d]"
+ errHostname = "expecting an hostname (from 1 to max 255 US-ASCII characters) [col %d]"
+ errTag = "expecting an alphanumeric tag (max 32 characters) [col %d]"
+ errContentStart = "expecting a content part starting with a non-alphanumeric character [col %d]"
+ errContent = "expecting a content part composed by visible characters only [col %d]"
+ errParse = "parsing error [col %d]"
+)
+
+const start int = 1
+const firstFinal int = 73
+
+const enFail int = 987
+const enMain int = 1
+
+type machine struct {
+ data []byte
+ cs int
+ p, pe, eof int
+ pb int
+ err error
+ bestEffort bool
+ yyyy int
+ rfc3339 bool
+ loc *time.Location
+ timezone *time.Location
+}
+
+// NewMachine creates a new FSM able to parse RFC3164 syslog messages.
+func NewMachine(options ...syslog.MachineOption) syslog.Machine {
+ m := &machine{}
+
+ for _, opt := range options {
+ opt(m)
+ }
+
+ return m
+}
+
+// WithBestEffort enables best effort mode.
+func (m *machine) WithBestEffort() {
+ m.bestEffort = true
+}
+
+// HasBestEffort tells whether the receiving machine has best effort mode on or off.
+func (m *machine) HasBestEffort() bool {
+ return m.bestEffort
+}
+
+// WithYear sets the year for the Stamp timestamp of the RFC 3164 syslog message.
+func (m *machine) WithYear(o YearOperator) {
+ m.yyyy = YearOperation{o}.Operate()
+}
+
+// WithTimezone sets the time zone for the Stamp timestamp of the RFC 3164 syslog message.
+func (m *machine) WithTimezone(loc *time.Location) {
+ m.loc = loc
+}
+
+// WithLocaleTimezone sets the locale time zone for the Stamp timestamp of the RFC 3164 syslog message.
+func (m *machine) WithLocaleTimezone(loc *time.Location) {
+ m.timezone = loc
+}
+
+// WithRFC3339 enables ability to ALSO match RFC3339 timestamps.
+//
+// Notice this does not disable the default and correct timestamps - ie., Stamp timestamps.
+func (m *machine) WithRFC3339() {
+ m.rfc3339 = true
+}
+
+// Err returns the error that occurred on the last call to Parse.
+//
+// If the result is nil, then the line was parsed successfully.
+func (m *machine) Err() error {
+ return m.err
+}
+
+func (m *machine) text() []byte {
+ return m.data[m.pb:m.p]
+}
+
+// Parse parses the input byte array as a RFC3164 syslog message.
+func (m *machine) Parse(input []byte) (syslog.Message, error) {
+ m.data = input
+ m.p = 0
+ m.pb = 0
+ m.pe = len(input)
+ m.eof = len(input)
+ m.err = nil
+ output := &syslogMessage{}
+ {
+ m.cs = start
+ }
+ {
+ var _widec int16
+ if (m.p) == (m.pe) {
+ goto _testEof
+ }
+ switch m.cs {
+ case 1:
+ goto stCase1
+ case 0:
+ goto stCase0
+ case 2:
+ goto stCase2
+ case 3:
+ goto stCase3
+ case 4:
+ goto stCase4
+ case 5:
+ goto stCase5
+ case 6:
+ goto stCase6
+ case 7:
+ goto stCase7
+ case 8:
+ goto stCase8
+ case 9:
+ goto stCase9
+ case 10:
+ goto stCase10
+ case 11:
+ goto stCase11
+ case 12:
+ goto stCase12
+ case 13:
+ goto stCase13
+ case 14:
+ goto stCase14
+ case 15:
+ goto stCase15
+ case 16:
+ goto stCase16
+ case 17:
+ goto stCase17
+ case 18:
+ goto stCase18
+ case 19:
+ goto stCase19
+ case 20:
+ goto stCase20
+ case 73:
+ goto stCase73
+ case 74:
+ goto stCase74
+ case 75:
+ goto stCase75
+ case 76:
+ goto stCase76
+ case 77:
+ goto stCase77
+ case 78:
+ goto stCase78
+ case 79:
+ goto stCase79
+ case 80:
+ goto stCase80
+ case 81:
+ goto stCase81
+ case 82:
+ goto stCase82
+ case 83:
+ goto stCase83
+ case 84:
+ goto stCase84
+ case 85:
+ goto stCase85
+ case 86:
+ goto stCase86
+ case 87:
+ goto stCase87
+ case 88:
+ goto stCase88
+ case 89:
+ goto stCase89
+ case 90:
+ goto stCase90
+ case 91:
+ goto stCase91
+ case 92:
+ goto stCase92
+ case 93:
+ goto stCase93
+ case 94:
+ goto stCase94
+ case 95:
+ goto stCase95
+ case 96:
+ goto stCase96
+ case 97:
+ goto stCase97
+ case 98:
+ goto stCase98
+ case 99:
+ goto stCase99
+ case 100:
+ goto stCase100
+ case 101:
+ goto stCase101
+ case 102:
+ goto stCase102
+ case 103:
+ goto stCase103
+ case 104:
+ goto stCase104
+ case 105:
+ goto stCase105
+ case 106:
+ goto stCase106
+ case 107:
+ goto stCase107
+ case 108:
+ goto stCase108
+ case 109:
+ goto stCase109
+ case 110:
+ goto stCase110
+ case 111:
+ goto stCase111
+ case 112:
+ goto stCase112
+ case 113:
+ goto stCase113
+ case 114:
+ goto stCase114
+ case 115:
+ goto stCase115
+ case 116:
+ goto stCase116
+ case 117:
+ goto stCase117
+ case 118:
+ goto stCase118
+ case 119:
+ goto stCase119
+ case 120:
+ goto stCase120
+ case 121:
+ goto stCase121
+ case 122:
+ goto stCase122
+ case 123:
+ goto stCase123
+ case 124:
+ goto stCase124
+ case 125:
+ goto stCase125
+ case 126:
+ goto stCase126
+ case 127:
+ goto stCase127
+ case 128:
+ goto stCase128
+ case 129:
+ goto stCase129
+ case 130:
+ goto stCase130
+ case 131:
+ goto stCase131
+ case 132:
+ goto stCase132
+ case 133:
+ goto stCase133
+ case 134:
+ goto stCase134
+ case 135:
+ goto stCase135
+ case 136:
+ goto stCase136
+ case 137:
+ goto stCase137
+ case 138:
+ goto stCase138
+ case 139:
+ goto stCase139
+ case 140:
+ goto stCase140
+ case 141:
+ goto stCase141
+ case 142:
+ goto stCase142
+ case 143:
+ goto stCase143
+ case 144:
+ goto stCase144
+ case 145:
+ goto stCase145
+ case 146:
+ goto stCase146
+ case 147:
+ goto stCase147
+ case 148:
+ goto stCase148
+ case 149:
+ goto stCase149
+ case 150:
+ goto stCase150
+ case 151:
+ goto stCase151
+ case 152:
+ goto stCase152
+ case 153:
+ goto stCase153
+ case 154:
+ goto stCase154
+ case 155:
+ goto stCase155
+ case 156:
+ goto stCase156
+ case 157:
+ goto stCase157
+ case 158:
+ goto stCase158
+ case 159:
+ goto stCase159
+ case 160:
+ goto stCase160
+ case 161:
+ goto stCase161
+ case 162:
+ goto stCase162
+ case 163:
+ goto stCase163
+ case 164:
+ goto stCase164
+ case 165:
+ goto stCase165
+ case 166:
+ goto stCase166
+ case 167:
+ goto stCase167
+ case 168:
+ goto stCase168
+ case 169:
+ goto stCase169
+ case 170:
+ goto stCase170
+ case 171:
+ goto stCase171
+ case 172:
+ goto stCase172
+ case 173:
+ goto stCase173
+ case 174:
+ goto stCase174
+ case 175:
+ goto stCase175
+ case 176:
+ goto stCase176
+ case 177:
+ goto stCase177
+ case 178:
+ goto stCase178
+ case 179:
+ goto stCase179
+ case 180:
+ goto stCase180
+ case 181:
+ goto stCase181
+ case 182:
+ goto stCase182
+ case 183:
+ goto stCase183
+ case 184:
+ goto stCase184
+ case 185:
+ goto stCase185
+ case 186:
+ goto stCase186
+ case 187:
+ goto stCase187
+ case 188:
+ goto stCase188
+ case 189:
+ goto stCase189
+ case 190:
+ goto stCase190
+ case 191:
+ goto stCase191
+ case 192:
+ goto stCase192
+ case 193:
+ goto stCase193
+ case 194:
+ goto stCase194
+ case 195:
+ goto stCase195
+ case 196:
+ goto stCase196
+ case 197:
+ goto stCase197
+ case 198:
+ goto stCase198
+ case 199:
+ goto stCase199
+ case 200:
+ goto stCase200
+ case 201:
+ goto stCase201
+ case 202:
+ goto stCase202
+ case 203:
+ goto stCase203
+ case 204:
+ goto stCase204
+ case 205:
+ goto stCase205
+ case 206:
+ goto stCase206
+ case 207:
+ goto stCase207
+ case 208:
+ goto stCase208
+ case 209:
+ goto stCase209
+ case 210:
+ goto stCase210
+ case 211:
+ goto stCase211
+ case 212:
+ goto stCase212
+ case 213:
+ goto stCase213
+ case 214:
+ goto stCase214
+ case 215:
+ goto stCase215
+ case 216:
+ goto stCase216
+ case 217:
+ goto stCase217
+ case 218:
+ goto stCase218
+ case 219:
+ goto stCase219
+ case 220:
+ goto stCase220
+ case 221:
+ goto stCase221
+ case 222:
+ goto stCase222
+ case 223:
+ goto stCase223
+ case 224:
+ goto stCase224
+ case 225:
+ goto stCase225
+ case 226:
+ goto stCase226
+ case 227:
+ goto stCase227
+ case 228:
+ goto stCase228
+ case 229:
+ goto stCase229
+ case 230:
+ goto stCase230
+ case 231:
+ goto stCase231
+ case 232:
+ goto stCase232
+ case 233:
+ goto stCase233
+ case 234:
+ goto stCase234
+ case 235:
+ goto stCase235
+ case 236:
+ goto stCase236
+ case 237:
+ goto stCase237
+ case 238:
+ goto stCase238
+ case 239:
+ goto stCase239
+ case 240:
+ goto stCase240
+ case 241:
+ goto stCase241
+ case 242:
+ goto stCase242
+ case 243:
+ goto stCase243
+ case 244:
+ goto stCase244
+ case 245:
+ goto stCase245
+ case 246:
+ goto stCase246
+ case 247:
+ goto stCase247
+ case 248:
+ goto stCase248
+ case 249:
+ goto stCase249
+ case 250:
+ goto stCase250
+ case 251:
+ goto stCase251
+ case 252:
+ goto stCase252
+ case 253:
+ goto stCase253
+ case 254:
+ goto stCase254
+ case 255:
+ goto stCase255
+ case 256:
+ goto stCase256
+ case 257:
+ goto stCase257
+ case 258:
+ goto stCase258
+ case 259:
+ goto stCase259
+ case 260:
+ goto stCase260
+ case 261:
+ goto stCase261
+ case 262:
+ goto stCase262
+ case 263:
+ goto stCase263
+ case 264:
+ goto stCase264
+ case 265:
+ goto stCase265
+ case 266:
+ goto stCase266
+ case 267:
+ goto stCase267
+ case 268:
+ goto stCase268
+ case 269:
+ goto stCase269
+ case 270:
+ goto stCase270
+ case 271:
+ goto stCase271
+ case 272:
+ goto stCase272
+ case 273:
+ goto stCase273
+ case 274:
+ goto stCase274
+ case 275:
+ goto stCase275
+ case 276:
+ goto stCase276
+ case 277:
+ goto stCase277
+ case 278:
+ goto stCase278
+ case 279:
+ goto stCase279
+ case 280:
+ goto stCase280
+ case 281:
+ goto stCase281
+ case 282:
+ goto stCase282
+ case 283:
+ goto stCase283
+ case 284:
+ goto stCase284
+ case 285:
+ goto stCase285
+ case 286:
+ goto stCase286
+ case 287:
+ goto stCase287
+ case 288:
+ goto stCase288
+ case 289:
+ goto stCase289
+ case 290:
+ goto stCase290
+ case 291:
+ goto stCase291
+ case 292:
+ goto stCase292
+ case 293:
+ goto stCase293
+ case 294:
+ goto stCase294
+ case 295:
+ goto stCase295
+ case 296:
+ goto stCase296
+ case 297:
+ goto stCase297
+ case 298:
+ goto stCase298
+ case 299:
+ goto stCase299
+ case 300:
+ goto stCase300
+ case 301:
+ goto stCase301
+ case 302:
+ goto stCase302
+ case 303:
+ goto stCase303
+ case 304:
+ goto stCase304
+ case 305:
+ goto stCase305
+ case 306:
+ goto stCase306
+ case 307:
+ goto stCase307
+ case 308:
+ goto stCase308
+ case 309:
+ goto stCase309
+ case 310:
+ goto stCase310
+ case 311:
+ goto stCase311
+ case 312:
+ goto stCase312
+ case 313:
+ goto stCase313
+ case 314:
+ goto stCase314
+ case 315:
+ goto stCase315
+ case 316:
+ goto stCase316
+ case 317:
+ goto stCase317
+ case 318:
+ goto stCase318
+ case 319:
+ goto stCase319
+ case 320:
+ goto stCase320
+ case 321:
+ goto stCase321
+ case 322:
+ goto stCase322
+ case 323:
+ goto stCase323
+ case 324:
+ goto stCase324
+ case 325:
+ goto stCase325
+ case 326:
+ goto stCase326
+ case 327:
+ goto stCase327
+ case 328:
+ goto stCase328
+ case 329:
+ goto stCase329
+ case 330:
+ goto stCase330
+ case 331:
+ goto stCase331
+ case 332:
+ goto stCase332
+ case 333:
+ goto stCase333
+ case 334:
+ goto stCase334
+ case 335:
+ goto stCase335
+ case 336:
+ goto stCase336
+ case 337:
+ goto stCase337
+ case 338:
+ goto stCase338
+ case 339:
+ goto stCase339
+ case 340:
+ goto stCase340
+ case 341:
+ goto stCase341
+ case 342:
+ goto stCase342
+ case 343:
+ goto stCase343
+ case 344:
+ goto stCase344
+ case 345:
+ goto stCase345
+ case 346:
+ goto stCase346
+ case 347:
+ goto stCase347
+ case 348:
+ goto stCase348
+ case 349:
+ goto stCase349
+ case 350:
+ goto stCase350
+ case 351:
+ goto stCase351
+ case 352:
+ goto stCase352
+ case 353:
+ goto stCase353
+ case 354:
+ goto stCase354
+ case 355:
+ goto stCase355
+ case 356:
+ goto stCase356
+ case 357:
+ goto stCase357
+ case 358:
+ goto stCase358
+ case 359:
+ goto stCase359
+ case 360:
+ goto stCase360
+ case 361:
+ goto stCase361
+ case 362:
+ goto stCase362
+ case 363:
+ goto stCase363
+ case 364:
+ goto stCase364
+ case 365:
+ goto stCase365
+ case 366:
+ goto stCase366
+ case 367:
+ goto stCase367
+ case 368:
+ goto stCase368
+ case 369:
+ goto stCase369
+ case 370:
+ goto stCase370
+ case 371:
+ goto stCase371
+ case 372:
+ goto stCase372
+ case 373:
+ goto stCase373
+ case 374:
+ goto stCase374
+ case 375:
+ goto stCase375
+ case 376:
+ goto stCase376
+ case 377:
+ goto stCase377
+ case 378:
+ goto stCase378
+ case 379:
+ goto stCase379
+ case 380:
+ goto stCase380
+ case 381:
+ goto stCase381
+ case 382:
+ goto stCase382
+ case 383:
+ goto stCase383
+ case 384:
+ goto stCase384
+ case 385:
+ goto stCase385
+ case 386:
+ goto stCase386
+ case 387:
+ goto stCase387
+ case 388:
+ goto stCase388
+ case 389:
+ goto stCase389
+ case 390:
+ goto stCase390
+ case 391:
+ goto stCase391
+ case 392:
+ goto stCase392
+ case 393:
+ goto stCase393
+ case 394:
+ goto stCase394
+ case 395:
+ goto stCase395
+ case 396:
+ goto stCase396
+ case 397:
+ goto stCase397
+ case 398:
+ goto stCase398
+ case 399:
+ goto stCase399
+ case 400:
+ goto stCase400
+ case 401:
+ goto stCase401
+ case 402:
+ goto stCase402
+ case 403:
+ goto stCase403
+ case 404:
+ goto stCase404
+ case 405:
+ goto stCase405
+ case 406:
+ goto stCase406
+ case 407:
+ goto stCase407
+ case 408:
+ goto stCase408
+ case 409:
+ goto stCase409
+ case 410:
+ goto stCase410
+ case 411:
+ goto stCase411
+ case 412:
+ goto stCase412
+ case 413:
+ goto stCase413
+ case 414:
+ goto stCase414
+ case 415:
+ goto stCase415
+ case 416:
+ goto stCase416
+ case 417:
+ goto stCase417
+ case 418:
+ goto stCase418
+ case 419:
+ goto stCase419
+ case 420:
+ goto stCase420
+ case 421:
+ goto stCase421
+ case 422:
+ goto stCase422
+ case 423:
+ goto stCase423
+ case 424:
+ goto stCase424
+ case 425:
+ goto stCase425
+ case 426:
+ goto stCase426
+ case 427:
+ goto stCase427
+ case 428:
+ goto stCase428
+ case 429:
+ goto stCase429
+ case 430:
+ goto stCase430
+ case 431:
+ goto stCase431
+ case 432:
+ goto stCase432
+ case 433:
+ goto stCase433
+ case 434:
+ goto stCase434
+ case 435:
+ goto stCase435
+ case 436:
+ goto stCase436
+ case 437:
+ goto stCase437
+ case 438:
+ goto stCase438
+ case 439:
+ goto stCase439
+ case 440:
+ goto stCase440
+ case 441:
+ goto stCase441
+ case 442:
+ goto stCase442
+ case 443:
+ goto stCase443
+ case 444:
+ goto stCase444
+ case 445:
+ goto stCase445
+ case 446:
+ goto stCase446
+ case 447:
+ goto stCase447
+ case 448:
+ goto stCase448
+ case 449:
+ goto stCase449
+ case 450:
+ goto stCase450
+ case 451:
+ goto stCase451
+ case 452:
+ goto stCase452
+ case 453:
+ goto stCase453
+ case 454:
+ goto stCase454
+ case 455:
+ goto stCase455
+ case 456:
+ goto stCase456
+ case 457:
+ goto stCase457
+ case 458:
+ goto stCase458
+ case 459:
+ goto stCase459
+ case 460:
+ goto stCase460
+ case 461:
+ goto stCase461
+ case 462:
+ goto stCase462
+ case 463:
+ goto stCase463
+ case 464:
+ goto stCase464
+ case 465:
+ goto stCase465
+ case 466:
+ goto stCase466
+ case 467:
+ goto stCase467
+ case 468:
+ goto stCase468
+ case 469:
+ goto stCase469
+ case 470:
+ goto stCase470
+ case 471:
+ goto stCase471
+ case 472:
+ goto stCase472
+ case 473:
+ goto stCase473
+ case 474:
+ goto stCase474
+ case 475:
+ goto stCase475
+ case 476:
+ goto stCase476
+ case 477:
+ goto stCase477
+ case 478:
+ goto stCase478
+ case 479:
+ goto stCase479
+ case 480:
+ goto stCase480
+ case 481:
+ goto stCase481
+ case 482:
+ goto stCase482
+ case 483:
+ goto stCase483
+ case 484:
+ goto stCase484
+ case 485:
+ goto stCase485
+ case 486:
+ goto stCase486
+ case 487:
+ goto stCase487
+ case 488:
+ goto stCase488
+ case 489:
+ goto stCase489
+ case 490:
+ goto stCase490
+ case 491:
+ goto stCase491
+ case 492:
+ goto stCase492
+ case 493:
+ goto stCase493
+ case 494:
+ goto stCase494
+ case 495:
+ goto stCase495
+ case 496:
+ goto stCase496
+ case 497:
+ goto stCase497
+ case 498:
+ goto stCase498
+ case 499:
+ goto stCase499
+ case 500:
+ goto stCase500
+ case 501:
+ goto stCase501
+ case 502:
+ goto stCase502
+ case 503:
+ goto stCase503
+ case 504:
+ goto stCase504
+ case 505:
+ goto stCase505
+ case 506:
+ goto stCase506
+ case 507:
+ goto stCase507
+ case 508:
+ goto stCase508
+ case 509:
+ goto stCase509
+ case 510:
+ goto stCase510
+ case 511:
+ goto stCase511
+ case 512:
+ goto stCase512
+ case 513:
+ goto stCase513
+ case 514:
+ goto stCase514
+ case 515:
+ goto stCase515
+ case 516:
+ goto stCase516
+ case 517:
+ goto stCase517
+ case 518:
+ goto stCase518
+ case 519:
+ goto stCase519
+ case 520:
+ goto stCase520
+ case 521:
+ goto stCase521
+ case 522:
+ goto stCase522
+ case 523:
+ goto stCase523
+ case 524:
+ goto stCase524
+ case 525:
+ goto stCase525
+ case 526:
+ goto stCase526
+ case 527:
+ goto stCase527
+ case 528:
+ goto stCase528
+ case 529:
+ goto stCase529
+ case 530:
+ goto stCase530
+ case 531:
+ goto stCase531
+ case 532:
+ goto stCase532
+ case 533:
+ goto stCase533
+ case 534:
+ goto stCase534
+ case 535:
+ goto stCase535
+ case 536:
+ goto stCase536
+ case 537:
+ goto stCase537
+ case 538:
+ goto stCase538
+ case 539:
+ goto stCase539
+ case 540:
+ goto stCase540
+ case 541:
+ goto stCase541
+ case 542:
+ goto stCase542
+ case 543:
+ goto stCase543
+ case 544:
+ goto stCase544
+ case 545:
+ goto stCase545
+ case 546:
+ goto stCase546
+ case 547:
+ goto stCase547
+ case 548:
+ goto stCase548
+ case 549:
+ goto stCase549
+ case 550:
+ goto stCase550
+ case 551:
+ goto stCase551
+ case 552:
+ goto stCase552
+ case 553:
+ goto stCase553
+ case 554:
+ goto stCase554
+ case 555:
+ goto stCase555
+ case 556:
+ goto stCase556
+ case 557:
+ goto stCase557
+ case 558:
+ goto stCase558
+ case 559:
+ goto stCase559
+ case 560:
+ goto stCase560
+ case 561:
+ goto stCase561
+ case 562:
+ goto stCase562
+ case 563:
+ goto stCase563
+ case 564:
+ goto stCase564
+ case 565:
+ goto stCase565
+ case 566:
+ goto stCase566
+ case 567:
+ goto stCase567
+ case 568:
+ goto stCase568
+ case 569:
+ goto stCase569
+ case 570:
+ goto stCase570
+ case 571:
+ goto stCase571
+ case 572:
+ goto stCase572
+ case 573:
+ goto stCase573
+ case 574:
+ goto stCase574
+ case 575:
+ goto stCase575
+ case 576:
+ goto stCase576
+ case 577:
+ goto stCase577
+ case 578:
+ goto stCase578
+ case 579:
+ goto stCase579
+ case 580:
+ goto stCase580
+ case 581:
+ goto stCase581
+ case 582:
+ goto stCase582
+ case 583:
+ goto stCase583
+ case 584:
+ goto stCase584
+ case 585:
+ goto stCase585
+ case 586:
+ goto stCase586
+ case 587:
+ goto stCase587
+ case 588:
+ goto stCase588
+ case 589:
+ goto stCase589
+ case 590:
+ goto stCase590
+ case 591:
+ goto stCase591
+ case 592:
+ goto stCase592
+ case 593:
+ goto stCase593
+ case 594:
+ goto stCase594
+ case 595:
+ goto stCase595
+ case 596:
+ goto stCase596
+ case 597:
+ goto stCase597
+ case 598:
+ goto stCase598
+ case 599:
+ goto stCase599
+ case 600:
+ goto stCase600
+ case 601:
+ goto stCase601
+ case 602:
+ goto stCase602
+ case 603:
+ goto stCase603
+ case 604:
+ goto stCase604
+ case 605:
+ goto stCase605
+ case 606:
+ goto stCase606
+ case 607:
+ goto stCase607
+ case 608:
+ goto stCase608
+ case 609:
+ goto stCase609
+ case 610:
+ goto stCase610
+ case 611:
+ goto stCase611
+ case 612:
+ goto stCase612
+ case 613:
+ goto stCase613
+ case 614:
+ goto stCase614
+ case 615:
+ goto stCase615
+ case 616:
+ goto stCase616
+ case 617:
+ goto stCase617
+ case 618:
+ goto stCase618
+ case 619:
+ goto stCase619
+ case 620:
+ goto stCase620
+ case 621:
+ goto stCase621
+ case 622:
+ goto stCase622
+ case 623:
+ goto stCase623
+ case 624:
+ goto stCase624
+ case 625:
+ goto stCase625
+ case 626:
+ goto stCase626
+ case 627:
+ goto stCase627
+ case 628:
+ goto stCase628
+ case 629:
+ goto stCase629
+ case 630:
+ goto stCase630
+ case 631:
+ goto stCase631
+ case 632:
+ goto stCase632
+ case 633:
+ goto stCase633
+ case 634:
+ goto stCase634
+ case 635:
+ goto stCase635
+ case 636:
+ goto stCase636
+ case 637:
+ goto stCase637
+ case 638:
+ goto stCase638
+ case 639:
+ goto stCase639
+ case 640:
+ goto stCase640
+ case 641:
+ goto stCase641
+ case 642:
+ goto stCase642
+ case 643:
+ goto stCase643
+ case 644:
+ goto stCase644
+ case 645:
+ goto stCase645
+ case 646:
+ goto stCase646
+ case 647:
+ goto stCase647
+ case 648:
+ goto stCase648
+ case 649:
+ goto stCase649
+ case 650:
+ goto stCase650
+ case 651:
+ goto stCase651
+ case 652:
+ goto stCase652
+ case 653:
+ goto stCase653
+ case 654:
+ goto stCase654
+ case 655:
+ goto stCase655
+ case 656:
+ goto stCase656
+ case 657:
+ goto stCase657
+ case 658:
+ goto stCase658
+ case 659:
+ goto stCase659
+ case 660:
+ goto stCase660
+ case 661:
+ goto stCase661
+ case 662:
+ goto stCase662
+ case 663:
+ goto stCase663
+ case 664:
+ goto stCase664
+ case 665:
+ goto stCase665
+ case 666:
+ goto stCase666
+ case 667:
+ goto stCase667
+ case 668:
+ goto stCase668
+ case 669:
+ goto stCase669
+ case 670:
+ goto stCase670
+ case 671:
+ goto stCase671
+ case 672:
+ goto stCase672
+ case 673:
+ goto stCase673
+ case 674:
+ goto stCase674
+ case 675:
+ goto stCase675
+ case 676:
+ goto stCase676
+ case 677:
+ goto stCase677
+ case 678:
+ goto stCase678
+ case 679:
+ goto stCase679
+ case 680:
+ goto stCase680
+ case 681:
+ goto stCase681
+ case 682:
+ goto stCase682
+ case 683:
+ goto stCase683
+ case 684:
+ goto stCase684
+ case 685:
+ goto stCase685
+ case 686:
+ goto stCase686
+ case 687:
+ goto stCase687
+ case 688:
+ goto stCase688
+ case 689:
+ goto stCase689
+ case 690:
+ goto stCase690
+ case 691:
+ goto stCase691
+ case 692:
+ goto stCase692
+ case 693:
+ goto stCase693
+ case 694:
+ goto stCase694
+ case 695:
+ goto stCase695
+ case 696:
+ goto stCase696
+ case 697:
+ goto stCase697
+ case 698:
+ goto stCase698
+ case 699:
+ goto stCase699
+ case 700:
+ goto stCase700
+ case 701:
+ goto stCase701
+ case 702:
+ goto stCase702
+ case 703:
+ goto stCase703
+ case 704:
+ goto stCase704
+ case 705:
+ goto stCase705
+ case 706:
+ goto stCase706
+ case 707:
+ goto stCase707
+ case 708:
+ goto stCase708
+ case 709:
+ goto stCase709
+ case 710:
+ goto stCase710
+ case 711:
+ goto stCase711
+ case 712:
+ goto stCase712
+ case 713:
+ goto stCase713
+ case 714:
+ goto stCase714
+ case 715:
+ goto stCase715
+ case 716:
+ goto stCase716
+ case 717:
+ goto stCase717
+ case 718:
+ goto stCase718
+ case 719:
+ goto stCase719
+ case 720:
+ goto stCase720
+ case 721:
+ goto stCase721
+ case 722:
+ goto stCase722
+ case 723:
+ goto stCase723
+ case 724:
+ goto stCase724
+ case 725:
+ goto stCase725
+ case 726:
+ goto stCase726
+ case 727:
+ goto stCase727
+ case 728:
+ goto stCase728
+ case 729:
+ goto stCase729
+ case 730:
+ goto stCase730
+ case 731:
+ goto stCase731
+ case 732:
+ goto stCase732
+ case 733:
+ goto stCase733
+ case 734:
+ goto stCase734
+ case 735:
+ goto stCase735
+ case 736:
+ goto stCase736
+ case 737:
+ goto stCase737
+ case 738:
+ goto stCase738
+ case 739:
+ goto stCase739
+ case 740:
+ goto stCase740
+ case 741:
+ goto stCase741
+ case 742:
+ goto stCase742
+ case 743:
+ goto stCase743
+ case 744:
+ goto stCase744
+ case 745:
+ goto stCase745
+ case 746:
+ goto stCase746
+ case 747:
+ goto stCase747
+ case 748:
+ goto stCase748
+ case 749:
+ goto stCase749
+ case 750:
+ goto stCase750
+ case 751:
+ goto stCase751
+ case 752:
+ goto stCase752
+ case 753:
+ goto stCase753
+ case 754:
+ goto stCase754
+ case 755:
+ goto stCase755
+ case 756:
+ goto stCase756
+ case 757:
+ goto stCase757
+ case 758:
+ goto stCase758
+ case 759:
+ goto stCase759
+ case 760:
+ goto stCase760
+ case 761:
+ goto stCase761
+ case 762:
+ goto stCase762
+ case 763:
+ goto stCase763
+ case 764:
+ goto stCase764
+ case 765:
+ goto stCase765
+ case 766:
+ goto stCase766
+ case 767:
+ goto stCase767
+ case 768:
+ goto stCase768
+ case 769:
+ goto stCase769
+ case 770:
+ goto stCase770
+ case 771:
+ goto stCase771
+ case 772:
+ goto stCase772
+ case 773:
+ goto stCase773
+ case 774:
+ goto stCase774
+ case 775:
+ goto stCase775
+ case 776:
+ goto stCase776
+ case 777:
+ goto stCase777
+ case 778:
+ goto stCase778
+ case 779:
+ goto stCase779
+ case 780:
+ goto stCase780
+ case 781:
+ goto stCase781
+ case 782:
+ goto stCase782
+ case 783:
+ goto stCase783
+ case 784:
+ goto stCase784
+ case 785:
+ goto stCase785
+ case 786:
+ goto stCase786
+ case 787:
+ goto stCase787
+ case 788:
+ goto stCase788
+ case 789:
+ goto stCase789
+ case 790:
+ goto stCase790
+ case 791:
+ goto stCase791
+ case 792:
+ goto stCase792
+ case 793:
+ goto stCase793
+ case 794:
+ goto stCase794
+ case 795:
+ goto stCase795
+ case 796:
+ goto stCase796
+ case 797:
+ goto stCase797
+ case 798:
+ goto stCase798
+ case 799:
+ goto stCase799
+ case 800:
+ goto stCase800
+ case 801:
+ goto stCase801
+ case 802:
+ goto stCase802
+ case 803:
+ goto stCase803
+ case 804:
+ goto stCase804
+ case 805:
+ goto stCase805
+ case 806:
+ goto stCase806
+ case 807:
+ goto stCase807
+ case 808:
+ goto stCase808
+ case 809:
+ goto stCase809
+ case 810:
+ goto stCase810
+ case 811:
+ goto stCase811
+ case 812:
+ goto stCase812
+ case 813:
+ goto stCase813
+ case 814:
+ goto stCase814
+ case 815:
+ goto stCase815
+ case 816:
+ goto stCase816
+ case 817:
+ goto stCase817
+ case 818:
+ goto stCase818
+ case 819:
+ goto stCase819
+ case 820:
+ goto stCase820
+ case 821:
+ goto stCase821
+ case 822:
+ goto stCase822
+ case 823:
+ goto stCase823
+ case 824:
+ goto stCase824
+ case 825:
+ goto stCase825
+ case 826:
+ goto stCase826
+ case 827:
+ goto stCase827
+ case 828:
+ goto stCase828
+ case 829:
+ goto stCase829
+ case 830:
+ goto stCase830
+ case 831:
+ goto stCase831
+ case 832:
+ goto stCase832
+ case 833:
+ goto stCase833
+ case 834:
+ goto stCase834
+ case 835:
+ goto stCase835
+ case 836:
+ goto stCase836
+ case 837:
+ goto stCase837
+ case 838:
+ goto stCase838
+ case 839:
+ goto stCase839
+ case 840:
+ goto stCase840
+ case 841:
+ goto stCase841
+ case 842:
+ goto stCase842
+ case 843:
+ goto stCase843
+ case 844:
+ goto stCase844
+ case 845:
+ goto stCase845
+ case 846:
+ goto stCase846
+ case 847:
+ goto stCase847
+ case 848:
+ goto stCase848
+ case 849:
+ goto stCase849
+ case 850:
+ goto stCase850
+ case 851:
+ goto stCase851
+ case 852:
+ goto stCase852
+ case 853:
+ goto stCase853
+ case 854:
+ goto stCase854
+ case 855:
+ goto stCase855
+ case 856:
+ goto stCase856
+ case 857:
+ goto stCase857
+ case 858:
+ goto stCase858
+ case 859:
+ goto stCase859
+ case 860:
+ goto stCase860
+ case 861:
+ goto stCase861
+ case 862:
+ goto stCase862
+ case 863:
+ goto stCase863
+ case 864:
+ goto stCase864
+ case 865:
+ goto stCase865
+ case 866:
+ goto stCase866
+ case 867:
+ goto stCase867
+ case 868:
+ goto stCase868
+ case 869:
+ goto stCase869
+ case 870:
+ goto stCase870
+ case 871:
+ goto stCase871
+ case 872:
+ goto stCase872
+ case 873:
+ goto stCase873
+ case 874:
+ goto stCase874
+ case 875:
+ goto stCase875
+ case 876:
+ goto stCase876
+ case 877:
+ goto stCase877
+ case 878:
+ goto stCase878
+ case 879:
+ goto stCase879
+ case 880:
+ goto stCase880
+ case 881:
+ goto stCase881
+ case 882:
+ goto stCase882
+ case 883:
+ goto stCase883
+ case 884:
+ goto stCase884
+ case 885:
+ goto stCase885
+ case 886:
+ goto stCase886
+ case 887:
+ goto stCase887
+ case 888:
+ goto stCase888
+ case 889:
+ goto stCase889
+ case 890:
+ goto stCase890
+ case 891:
+ goto stCase891
+ case 892:
+ goto stCase892
+ case 893:
+ goto stCase893
+ case 894:
+ goto stCase894
+ case 895:
+ goto stCase895
+ case 896:
+ goto stCase896
+ case 897:
+ goto stCase897
+ case 898:
+ goto stCase898
+ case 899:
+ goto stCase899
+ case 900:
+ goto stCase900
+ case 901:
+ goto stCase901
+ case 902:
+ goto stCase902
+ case 903:
+ goto stCase903
+ case 904:
+ goto stCase904
+ case 905:
+ goto stCase905
+ case 906:
+ goto stCase906
+ case 907:
+ goto stCase907
+ case 908:
+ goto stCase908
+ case 909:
+ goto stCase909
+ case 910:
+ goto stCase910
+ case 911:
+ goto stCase911
+ case 912:
+ goto stCase912
+ case 913:
+ goto stCase913
+ case 914:
+ goto stCase914
+ case 915:
+ goto stCase915
+ case 916:
+ goto stCase916
+ case 917:
+ goto stCase917
+ case 918:
+ goto stCase918
+ case 919:
+ goto stCase919
+ case 920:
+ goto stCase920
+ case 921:
+ goto stCase921
+ case 922:
+ goto stCase922
+ case 923:
+ goto stCase923
+ case 924:
+ goto stCase924
+ case 925:
+ goto stCase925
+ case 926:
+ goto stCase926
+ case 927:
+ goto stCase927
+ case 928:
+ goto stCase928
+ case 929:
+ goto stCase929
+ case 930:
+ goto stCase930
+ case 931:
+ goto stCase931
+ case 932:
+ goto stCase932
+ case 933:
+ goto stCase933
+ case 934:
+ goto stCase934
+ case 935:
+ goto stCase935
+ case 936:
+ goto stCase936
+ case 937:
+ goto stCase937
+ case 938:
+ goto stCase938
+ case 939:
+ goto stCase939
+ case 940:
+ goto stCase940
+ case 941:
+ goto stCase941
+ case 942:
+ goto stCase942
+ case 943:
+ goto stCase943
+ case 944:
+ goto stCase944
+ case 945:
+ goto stCase945
+ case 946:
+ goto stCase946
+ case 947:
+ goto stCase947
+ case 948:
+ goto stCase948
+ case 949:
+ goto stCase949
+ case 950:
+ goto stCase950
+ case 951:
+ goto stCase951
+ case 952:
+ goto stCase952
+ case 953:
+ goto stCase953
+ case 954:
+ goto stCase954
+ case 955:
+ goto stCase955
+ case 956:
+ goto stCase956
+ case 957:
+ goto stCase957
+ case 958:
+ goto stCase958
+ case 959:
+ goto stCase959
+ case 960:
+ goto stCase960
+ case 961:
+ goto stCase961
+ case 962:
+ goto stCase962
+ case 963:
+ goto stCase963
+ case 964:
+ goto stCase964
+ case 965:
+ goto stCase965
+ case 966:
+ goto stCase966
+ case 967:
+ goto stCase967
+ case 968:
+ goto stCase968
+ case 969:
+ goto stCase969
+ case 970:
+ goto stCase970
+ case 971:
+ goto stCase971
+ case 972:
+ goto stCase972
+ case 973:
+ goto stCase973
+ case 974:
+ goto stCase974
+ case 975:
+ goto stCase975
+ case 976:
+ goto stCase976
+ case 977:
+ goto stCase977
+ case 978:
+ goto stCase978
+ case 979:
+ goto stCase979
+ case 980:
+ goto stCase980
+ case 981:
+ goto stCase981
+ case 982:
+ goto stCase982
+ case 983:
+ goto stCase983
+ case 984:
+ goto stCase984
+ case 985:
+ goto stCase985
+ case 986:
+ goto stCase986
+ case 21:
+ goto stCase21
+ case 22:
+ goto stCase22
+ case 23:
+ goto stCase23
+ case 24:
+ goto stCase24
+ case 25:
+ goto stCase25
+ case 26:
+ goto stCase26
+ case 27:
+ goto stCase27
+ case 28:
+ goto stCase28
+ case 29:
+ goto stCase29
+ case 30:
+ goto stCase30
+ case 31:
+ goto stCase31
+ case 32:
+ goto stCase32
+ case 33:
+ goto stCase33
+ case 34:
+ goto stCase34
+ case 35:
+ goto stCase35
+ case 36:
+ goto stCase36
+ case 37:
+ goto stCase37
+ case 38:
+ goto stCase38
+ case 39:
+ goto stCase39
+ case 40:
+ goto stCase40
+ case 41:
+ goto stCase41
+ case 42:
+ goto stCase42
+ case 43:
+ goto stCase43
+ case 44:
+ goto stCase44
+ case 45:
+ goto stCase45
+ case 46:
+ goto stCase46
+ case 47:
+ goto stCase47
+ case 48:
+ goto stCase48
+ case 49:
+ goto stCase49
+ case 50:
+ goto stCase50
+ case 51:
+ goto stCase51
+ case 52:
+ goto stCase52
+ case 53:
+ goto stCase53
+ case 54:
+ goto stCase54
+ case 55:
+ goto stCase55
+ case 56:
+ goto stCase56
+ case 57:
+ goto stCase57
+ case 58:
+ goto stCase58
+ case 59:
+ goto stCase59
+ case 60:
+ goto stCase60
+ case 61:
+ goto stCase61
+ case 62:
+ goto stCase62
+ case 63:
+ goto stCase63
+ case 64:
+ goto stCase64
+ case 65:
+ goto stCase65
+ case 66:
+ goto stCase66
+ case 67:
+ goto stCase67
+ case 68:
+ goto stCase68
+ case 69:
+ goto stCase69
+ case 70:
+ goto stCase70
+ case 71:
+ goto stCase71
+ case 72:
+ goto stCase72
+ case 987:
+ goto stCase987
+ }
+ goto stOut
+ stCase1:
+ if (m.data)[(m.p)] == 60 {
+ goto st2
+ }
+ goto tr0
+ tr0:
+
+ m.err = fmt.Errorf(errPri, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+
+ goto st0
+ tr2:
+
+ m.err = fmt.Errorf(errPrival, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+
+ m.err = fmt.Errorf(errPri, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+
+ goto st0
+ tr7:
+
+ m.err = fmt.Errorf(errTimestamp, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+
+ goto st0
+ tr37:
+
+ m.err = fmt.Errorf(errHostname, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+
+ m.err = fmt.Errorf(errTag, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+
+ goto st0
+ tr72:
+
+ m.err = fmt.Errorf(errRFC3339, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+
+ goto st0
+ tr85:
+
+ m.err = fmt.Errorf(errHostname, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+
+ goto st0
+ tr91:
+
+ m.err = fmt.Errorf(errTag, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+
+ goto st0
+ tr143:
+
+ m.err = fmt.Errorf(errContentStart, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+
+ goto st0
+ tr449:
+
+ m.err = fmt.Errorf(errHostname, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+
+ m.err = fmt.Errorf(errContentStart, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+
+ goto st0
+ stCase0:
+ st0:
+ m.cs = 0
+ goto _out
+ st2:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof2
+ }
+ stCase2:
+ switch (m.data)[(m.p)] {
+ case 48:
+ goto tr3
+ case 49:
+ goto tr4
+ }
+ if 50 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr5
+ }
+ goto tr2
+ tr3:
+
+ m.pb = m.p
+
+ goto st3
+ st3:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof3
+ }
+ stCase3:
+
+ output.priority = uint8(common.UnsafeUTF8DecimalCodePointsToInt(m.text()))
+ output.prioritySet = true
+ if (m.data)[(m.p)] == 62 {
+ goto st4
+ }
+ goto tr2
+ st4:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof4
+ }
+ stCase4:
+ _widec = int16((m.data)[(m.p)])
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ switch _widec {
+ case 32:
+ goto st4
+ case 65:
+ goto tr8
+ case 68:
+ goto tr9
+ case 70:
+ goto tr10
+ case 74:
+ goto tr11
+ case 77:
+ goto tr12
+ case 78:
+ goto tr13
+ case 79:
+ goto tr14
+ case 83:
+ goto tr15
+ }
+ if 560 <= _widec && _widec <= 569 {
+ goto tr16
+ }
+ goto tr7
+ tr8:
+
+ m.pb = m.p
+
+ goto st5
+ st5:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof5
+ }
+ stCase5:
+ switch (m.data)[(m.p)] {
+ case 112:
+ goto st6
+ case 117:
+ goto st24
+ }
+ goto tr7
+ st6:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof6
+ }
+ stCase6:
+ if (m.data)[(m.p)] == 114 {
+ goto st7
+ }
+ goto tr7
+ st7:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof7
+ }
+ stCase7:
+ if (m.data)[(m.p)] == 32 {
+ goto st8
+ }
+ goto tr7
+ st8:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof8
+ }
+ stCase8:
+ switch (m.data)[(m.p)] {
+ case 32:
+ goto st9
+ case 51:
+ goto st23
+ }
+ if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 50 {
+ goto st22
+ }
+ goto tr7
+ st9:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof9
+ }
+ stCase9:
+ if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto st10
+ }
+ goto tr7
+ st10:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof10
+ }
+ stCase10:
+ if (m.data)[(m.p)] == 32 {
+ goto st11
+ }
+ goto tr7
+ st11:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof11
+ }
+ stCase11:
+ if (m.data)[(m.p)] == 50 {
+ goto st21
+ }
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 49 {
+ goto st12
+ }
+ goto tr7
+ st12:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof12
+ }
+ stCase12:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto st13
+ }
+ goto tr7
+ st13:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof13
+ }
+ stCase13:
+ if (m.data)[(m.p)] == 58 {
+ goto st14
+ }
+ goto tr7
+ st14:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof14
+ }
+ stCase14:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 53 {
+ goto st15
+ }
+ goto tr7
+ st15:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof15
+ }
+ stCase15:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto st16
+ }
+ goto tr7
+ st16:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof16
+ }
+ stCase16:
+ if (m.data)[(m.p)] == 58 {
+ goto st17
+ }
+ goto tr7
+ st17:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof17
+ }
+ stCase17:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 53 {
+ goto st18
+ }
+ goto tr7
+ st18:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof18
+ }
+ stCase18:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto st19
+ }
+ goto tr7
+ st19:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof19
+ }
+ stCase19:
+ if (m.data)[(m.p)] == 32 {
+ goto tr35
+ }
+ goto st0
+ tr35:
+
+ if t, e := time.Parse(time.Stamp, string(m.text())); e != nil {
+ m.err = fmt.Errorf("%s [col %d]", e, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+ } else {
+ if m.timezone != nil {
+ t, _ = time.ParseInLocation(time.Stamp, string(m.text()), m.timezone)
+ }
+ output.timestamp = t.AddDate(m.yyyy, 0, 0)
+ if m.loc != nil {
+ output.timestamp = output.timestamp.In(m.loc)
+ }
+ output.timestampSet = true
+ }
+
+ goto st20
+ tr80:
+
+ if t, e := time.Parse(time.RFC3339, string(m.text())); e != nil {
+ m.err = fmt.Errorf("%s [col %d]", e, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+ } else {
+ output.timestamp = t
+ output.timestampSet = true
+ }
+
+ goto st20
+ st20:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof20
+ }
+ stCase20:
+ switch (m.data)[(m.p)] {
+ case 32:
+ goto tr38
+ case 91:
+ goto tr41
+ case 127:
+ goto tr37
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr37
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr39
+ }
+ default:
+ goto tr39
+ }
+ goto tr40
+ tr38:
+
+ m.pb = m.p
+
+ goto st73
+ st73:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof73
+ }
+ stCase73:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr38
+ case 91:
+ goto tr41
+ case 127:
+ goto tr37
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr37
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr39
+ }
+ default:
+ goto tr39
+ }
+ goto tr40
+ tr84:
+
+ output.message = string(m.text())
+
+ goto st74
+ st74:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof74
+ }
+ stCase74:
+ goto st0
+ tr39:
+
+ m.pb = m.p
+
+ goto st75
+ st75:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof75
+ }
+ stCase75:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr89
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st131
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr92:
+
+ m.pb = m.p
+
+ goto st76
+ tr86:
+
+ output.hostname = string(m.text())
+
+ goto st76
+ st76:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof76
+ }
+ stCase76:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr92
+ case 127:
+ goto tr91
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr91
+ }
+ case (m.data)[(m.p)] > 57:
+ switch {
+ case (m.data)[(m.p)] > 90:
+ if 92 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr93
+ }
+ case (m.data)[(m.p)] >= 59:
+ goto tr93
+ }
+ default:
+ goto tr93
+ }
+ goto tr40
+ tr93:
+
+ m.pb = m.p
+
+ goto st77
+ st77:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof77
+ }
+ stCase77:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st79
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ tr40:
+
+ m.pb = m.p
+
+ goto st78
+ st78:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof78
+ }
+ stCase78:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 127:
+ goto st0
+ }
+ if (m.data)[(m.p)] <= 31 {
+ goto st0
+ }
+ goto st78
+ st79:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof79
+ }
+ stCase79:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st80
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st80:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof80
+ }
+ stCase80:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st81
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st81:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof81
+ }
+ stCase81:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st82
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st82:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof82
+ }
+ stCase82:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st83
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st83:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof83
+ }
+ stCase83:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st84
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st84:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof84
+ }
+ stCase84:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st85
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st85:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof85
+ }
+ stCase85:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st86
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st86:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof86
+ }
+ stCase86:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st87
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st87:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof87
+ }
+ stCase87:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st88
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st88:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof88
+ }
+ stCase88:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st89
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st89:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof89
+ }
+ stCase89:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st90
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st90:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof90
+ }
+ stCase90:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st91
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st91:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof91
+ }
+ stCase91:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st92
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st92:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof92
+ }
+ stCase92:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st93
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st93:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof93
+ }
+ stCase93:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st94
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st94:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof94
+ }
+ stCase94:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st95
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st95:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof95
+ }
+ stCase95:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st96
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st96:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof96
+ }
+ stCase96:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st97
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st97:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof97
+ }
+ stCase97:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st98
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st98:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof98
+ }
+ stCase98:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st99
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st99:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof99
+ }
+ stCase99:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st100
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st100:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof100
+ }
+ stCase100:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st101
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st101:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof101
+ }
+ stCase101:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st102
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st102:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof102
+ }
+ stCase102:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st103
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st103:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof103
+ }
+ stCase103:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st104
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st104:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof104
+ }
+ stCase104:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st105
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st105:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof105
+ }
+ stCase105:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st106
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st106:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof106
+ }
+ stCase106:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st107
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st107:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof107
+ }
+ stCase107:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st108
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st108:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof108
+ }
+ stCase108:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st109
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st109:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof109
+ }
+ stCase109:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st110
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st110:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof110
+ }
+ stCase110:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st111
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st111:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof111
+ }
+ stCase111:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st112
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st112:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof112
+ }
+ stCase112:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st113
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st113:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof113
+ }
+ stCase113:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st114
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st114:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof114
+ }
+ stCase114:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st115
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st115:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof115
+ }
+ stCase115:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st116
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st116:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof116
+ }
+ stCase116:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st117
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st117:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof117
+ }
+ stCase117:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st118
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st118:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof118
+ }
+ stCase118:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st119
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st119:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof119
+ }
+ stCase119:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st120
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st120:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof120
+ }
+ stCase120:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st121
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st121:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof121
+ }
+ stCase121:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st122
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st122:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof122
+ }
+ stCase122:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st123
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st123:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof123
+ }
+ stCase123:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st124
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st124:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof124
+ }
+ stCase124:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st125
+ }
+ default:
+ goto st0
+ }
+ goto st78
+ st125:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof125
+ }
+ stCase125:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto tr88
+ case 91:
+ goto tr95
+ case 127:
+ goto st0
+ }
+ if (m.data)[(m.p)] <= 31 {
+ goto st0
+ }
+ goto st78
+ tr88:
+
+ output.tag = string(m.text())
+
+ goto st126
+ st126:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof126
+ }
+ stCase126:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto st127
+ case 127:
+ goto st0
+ }
+ if (m.data)[(m.p)] <= 31 {
+ goto st0
+ }
+ goto st78
+ st127:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof127
+ }
+ stCase127:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 127:
+ goto st0
+ }
+ if (m.data)[(m.p)] <= 31 {
+ goto st0
+ }
+ goto tr40
+ tr95:
+
+ output.tag = string(m.text())
+
+ goto st128
+ st128:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof128
+ }
+ stCase128:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 93:
+ goto tr145
+ case 127:
+ goto tr143
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr143
+ }
+ case (m.data)[(m.p)] > 90:
+ if 92 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr144
+ }
+ default:
+ goto tr144
+ }
+ goto st78
+ tr144:
+
+ m.pb = m.p
+
+ goto st129
+ st129:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof129
+ }
+ stCase129:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 93:
+ goto tr147
+ case 127:
+ goto tr143
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr143
+ }
+ case (m.data)[(m.p)] > 90:
+ if 92 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st129
+ }
+ default:
+ goto st129
+ }
+ goto st78
+ tr145:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st130
+ tr147:
+
+ output.content = string(m.text())
+
+ goto st130
+ st130:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof130
+ }
+ stCase130:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 58:
+ goto st126
+ case 127:
+ goto st0
+ }
+ if (m.data)[(m.p)] <= 31 {
+ goto st0
+ }
+ goto st78
+ st131:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof131
+ }
+ stCase131:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr150
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st132
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st132:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof132
+ }
+ stCase132:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr152
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st133
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st133:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof133
+ }
+ stCase133:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr154
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st134
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st134:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof134
+ }
+ stCase134:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr156
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st135
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st135:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof135
+ }
+ stCase135:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr158
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st136
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st136:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof136
+ }
+ stCase136:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr160
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st137
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st137:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof137
+ }
+ stCase137:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr162
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st138
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st138:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof138
+ }
+ stCase138:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr164
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st139
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st139:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof139
+ }
+ stCase139:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr166
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st140
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st140:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof140
+ }
+ stCase140:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr168
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st141
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st141:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof141
+ }
+ stCase141:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr170
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st142
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st142:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof142
+ }
+ stCase142:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr172
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st143
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st143:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof143
+ }
+ stCase143:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr174
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st144
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st144:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof144
+ }
+ stCase144:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr176
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st145
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st145:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof145
+ }
+ stCase145:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr178
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st146
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st146:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof146
+ }
+ stCase146:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr180
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st147
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st147:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof147
+ }
+ stCase147:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr182
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st148
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st148:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof148
+ }
+ stCase148:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr184
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st149
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st149:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof149
+ }
+ stCase149:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr186
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st150
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st150:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof150
+ }
+ stCase150:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr188
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st151
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st151:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof151
+ }
+ stCase151:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr190
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st152
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st152:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof152
+ }
+ stCase152:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr192
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st153
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st153:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof153
+ }
+ stCase153:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr194
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st154
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st154:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof154
+ }
+ stCase154:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr196
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st155
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st155:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof155
+ }
+ stCase155:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr198
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st156
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st156:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof156
+ }
+ stCase156:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr200
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st157
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st157:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof157
+ }
+ stCase157:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr202
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st158
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st158:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof158
+ }
+ stCase158:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr204
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st159
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st159:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof159
+ }
+ stCase159:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr206
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st160
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st160:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof160
+ }
+ stCase160:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr208
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st161
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st161:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof161
+ }
+ stCase161:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr210
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st162
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st162:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof162
+ }
+ stCase162:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr212
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st163
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st163:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof163
+ }
+ stCase163:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr214
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st164
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st164:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof164
+ }
+ stCase164:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr216
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st165
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st165:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof165
+ }
+ stCase165:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr218
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st166
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st166:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof166
+ }
+ stCase166:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr220
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st167
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st167:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof167
+ }
+ stCase167:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr222
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st168
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st168:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof168
+ }
+ stCase168:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr224
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st169
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st169:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof169
+ }
+ stCase169:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr226
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st170
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st170:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof170
+ }
+ stCase170:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr228
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st171
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st171:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof171
+ }
+ stCase171:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr230
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st172
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st172:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof172
+ }
+ stCase172:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr232
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st173
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st173:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof173
+ }
+ stCase173:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr234
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st174
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st174:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof174
+ }
+ stCase174:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr236
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st175
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st175:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof175
+ }
+ stCase175:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr238
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st176
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st176:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof176
+ }
+ stCase176:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr240
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st177
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st177:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof177
+ }
+ stCase177:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr88
+ case 91:
+ goto tr242
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st178
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ st178:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof178
+ }
+ stCase178:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st179
+ }
+ default:
+ goto st179
+ }
+ goto st78
+ st179:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof179
+ }
+ stCase179:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st180
+ }
+ default:
+ goto st180
+ }
+ goto st78
+ st180:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof180
+ }
+ stCase180:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st181
+ }
+ default:
+ goto st181
+ }
+ goto st78
+ st181:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof181
+ }
+ stCase181:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st182
+ }
+ default:
+ goto st182
+ }
+ goto st78
+ st182:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof182
+ }
+ stCase182:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st183
+ }
+ default:
+ goto st183
+ }
+ goto st78
+ st183:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof183
+ }
+ stCase183:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st184
+ }
+ default:
+ goto st184
+ }
+ goto st78
+ st184:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof184
+ }
+ stCase184:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st185
+ }
+ default:
+ goto st185
+ }
+ goto st78
+ st185:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof185
+ }
+ stCase185:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st186
+ }
+ default:
+ goto st186
+ }
+ goto st78
+ st186:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof186
+ }
+ stCase186:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st187
+ }
+ default:
+ goto st187
+ }
+ goto st78
+ st187:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof187
+ }
+ stCase187:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st188
+ }
+ default:
+ goto st188
+ }
+ goto st78
+ st188:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof188
+ }
+ stCase188:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st189
+ }
+ default:
+ goto st189
+ }
+ goto st78
+ st189:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof189
+ }
+ stCase189:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st190
+ }
+ default:
+ goto st190
+ }
+ goto st78
+ st190:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof190
+ }
+ stCase190:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st191
+ }
+ default:
+ goto st191
+ }
+ goto st78
+ st191:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof191
+ }
+ stCase191:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st192
+ }
+ default:
+ goto st192
+ }
+ goto st78
+ st192:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof192
+ }
+ stCase192:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st193
+ }
+ default:
+ goto st193
+ }
+ goto st78
+ st193:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof193
+ }
+ stCase193:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st194
+ }
+ default:
+ goto st194
+ }
+ goto st78
+ st194:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof194
+ }
+ stCase194:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st195
+ }
+ default:
+ goto st195
+ }
+ goto st78
+ st195:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof195
+ }
+ stCase195:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st196
+ }
+ default:
+ goto st196
+ }
+ goto st78
+ st196:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof196
+ }
+ stCase196:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st197
+ }
+ default:
+ goto st197
+ }
+ goto st78
+ st197:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof197
+ }
+ stCase197:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st198
+ }
+ default:
+ goto st198
+ }
+ goto st78
+ st198:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof198
+ }
+ stCase198:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st199
+ }
+ default:
+ goto st199
+ }
+ goto st78
+ st199:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof199
+ }
+ stCase199:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st200
+ }
+ default:
+ goto st200
+ }
+ goto st78
+ st200:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof200
+ }
+ stCase200:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st201
+ }
+ default:
+ goto st201
+ }
+ goto st78
+ st201:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof201
+ }
+ stCase201:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st202
+ }
+ default:
+ goto st202
+ }
+ goto st78
+ st202:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof202
+ }
+ stCase202:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st203
+ }
+ default:
+ goto st203
+ }
+ goto st78
+ st203:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof203
+ }
+ stCase203:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st204
+ }
+ default:
+ goto st204
+ }
+ goto st78
+ st204:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof204
+ }
+ stCase204:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st205
+ }
+ default:
+ goto st205
+ }
+ goto st78
+ st205:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof205
+ }
+ stCase205:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st206
+ }
+ default:
+ goto st206
+ }
+ goto st78
+ st206:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof206
+ }
+ stCase206:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st207
+ }
+ default:
+ goto st207
+ }
+ goto st78
+ st207:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof207
+ }
+ stCase207:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st208
+ }
+ default:
+ goto st208
+ }
+ goto st78
+ st208:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof208
+ }
+ stCase208:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st209
+ }
+ default:
+ goto st209
+ }
+ goto st78
+ st209:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof209
+ }
+ stCase209:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st210
+ }
+ default:
+ goto st210
+ }
+ goto st78
+ st210:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof210
+ }
+ stCase210:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st211
+ }
+ default:
+ goto st211
+ }
+ goto st78
+ st211:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof211
+ }
+ stCase211:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st212
+ }
+ default:
+ goto st212
+ }
+ goto st78
+ st212:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof212
+ }
+ stCase212:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st213
+ }
+ default:
+ goto st213
+ }
+ goto st78
+ st213:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof213
+ }
+ stCase213:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st214
+ }
+ default:
+ goto st214
+ }
+ goto st78
+ st214:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof214
+ }
+ stCase214:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st215
+ }
+ default:
+ goto st215
+ }
+ goto st78
+ st215:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof215
+ }
+ stCase215:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st216
+ }
+ default:
+ goto st216
+ }
+ goto st78
+ st216:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof216
+ }
+ stCase216:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st217
+ }
+ default:
+ goto st217
+ }
+ goto st78
+ st217:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof217
+ }
+ stCase217:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st218
+ }
+ default:
+ goto st218
+ }
+ goto st78
+ st218:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof218
+ }
+ stCase218:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st219
+ }
+ default:
+ goto st219
+ }
+ goto st78
+ st219:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof219
+ }
+ stCase219:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st220
+ }
+ default:
+ goto st220
+ }
+ goto st78
+ st220:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof220
+ }
+ stCase220:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st221
+ }
+ default:
+ goto st221
+ }
+ goto st78
+ st221:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof221
+ }
+ stCase221:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st222
+ }
+ default:
+ goto st222
+ }
+ goto st78
+ st222:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof222
+ }
+ stCase222:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st223
+ }
+ default:
+ goto st223
+ }
+ goto st78
+ st223:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof223
+ }
+ stCase223:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st224
+ }
+ default:
+ goto st224
+ }
+ goto st78
+ st224:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof224
+ }
+ stCase224:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st225
+ }
+ default:
+ goto st225
+ }
+ goto st78
+ st225:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof225
+ }
+ stCase225:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st226
+ }
+ default:
+ goto st226
+ }
+ goto st78
+ st226:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof226
+ }
+ stCase226:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st227
+ }
+ default:
+ goto st227
+ }
+ goto st78
+ st227:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof227
+ }
+ stCase227:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st228
+ }
+ default:
+ goto st228
+ }
+ goto st78
+ st228:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof228
+ }
+ stCase228:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st229
+ }
+ default:
+ goto st229
+ }
+ goto st78
+ st229:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof229
+ }
+ stCase229:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st230
+ }
+ default:
+ goto st230
+ }
+ goto st78
+ st230:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof230
+ }
+ stCase230:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st231
+ }
+ default:
+ goto st231
+ }
+ goto st78
+ st231:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof231
+ }
+ stCase231:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st232
+ }
+ default:
+ goto st232
+ }
+ goto st78
+ st232:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof232
+ }
+ stCase232:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st233
+ }
+ default:
+ goto st233
+ }
+ goto st78
+ st233:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof233
+ }
+ stCase233:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st234
+ }
+ default:
+ goto st234
+ }
+ goto st78
+ st234:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof234
+ }
+ stCase234:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st235
+ }
+ default:
+ goto st235
+ }
+ goto st78
+ st235:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof235
+ }
+ stCase235:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st236
+ }
+ default:
+ goto st236
+ }
+ goto st78
+ st236:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof236
+ }
+ stCase236:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st237
+ }
+ default:
+ goto st237
+ }
+ goto st78
+ st237:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof237
+ }
+ stCase237:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st238
+ }
+ default:
+ goto st238
+ }
+ goto st78
+ st238:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof238
+ }
+ stCase238:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st239
+ }
+ default:
+ goto st239
+ }
+ goto st78
+ st239:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof239
+ }
+ stCase239:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st240
+ }
+ default:
+ goto st240
+ }
+ goto st78
+ st240:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof240
+ }
+ stCase240:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st241
+ }
+ default:
+ goto st241
+ }
+ goto st78
+ st241:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof241
+ }
+ stCase241:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st242
+ }
+ default:
+ goto st242
+ }
+ goto st78
+ st242:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof242
+ }
+ stCase242:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st243
+ }
+ default:
+ goto st243
+ }
+ goto st78
+ st243:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof243
+ }
+ stCase243:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st244
+ }
+ default:
+ goto st244
+ }
+ goto st78
+ st244:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof244
+ }
+ stCase244:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st245
+ }
+ default:
+ goto st245
+ }
+ goto st78
+ st245:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof245
+ }
+ stCase245:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st246
+ }
+ default:
+ goto st246
+ }
+ goto st78
+ st246:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof246
+ }
+ stCase246:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st247
+ }
+ default:
+ goto st247
+ }
+ goto st78
+ st247:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof247
+ }
+ stCase247:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st248
+ }
+ default:
+ goto st248
+ }
+ goto st78
+ st248:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof248
+ }
+ stCase248:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st249
+ }
+ default:
+ goto st249
+ }
+ goto st78
+ st249:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof249
+ }
+ stCase249:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st250
+ }
+ default:
+ goto st250
+ }
+ goto st78
+ st250:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof250
+ }
+ stCase250:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st251
+ }
+ default:
+ goto st251
+ }
+ goto st78
+ st251:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof251
+ }
+ stCase251:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st252
+ }
+ default:
+ goto st252
+ }
+ goto st78
+ st252:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof252
+ }
+ stCase252:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st253
+ }
+ default:
+ goto st253
+ }
+ goto st78
+ st253:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof253
+ }
+ stCase253:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st254
+ }
+ default:
+ goto st254
+ }
+ goto st78
+ st254:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof254
+ }
+ stCase254:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st255
+ }
+ default:
+ goto st255
+ }
+ goto st78
+ st255:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof255
+ }
+ stCase255:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st256
+ }
+ default:
+ goto st256
+ }
+ goto st78
+ st256:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof256
+ }
+ stCase256:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st257
+ }
+ default:
+ goto st257
+ }
+ goto st78
+ st257:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof257
+ }
+ stCase257:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st258
+ }
+ default:
+ goto st258
+ }
+ goto st78
+ st258:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof258
+ }
+ stCase258:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st259
+ }
+ default:
+ goto st259
+ }
+ goto st78
+ st259:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof259
+ }
+ stCase259:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st260
+ }
+ default:
+ goto st260
+ }
+ goto st78
+ st260:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof260
+ }
+ stCase260:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st261
+ }
+ default:
+ goto st261
+ }
+ goto st78
+ st261:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof261
+ }
+ stCase261:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st262
+ }
+ default:
+ goto st262
+ }
+ goto st78
+ st262:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof262
+ }
+ stCase262:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st263
+ }
+ default:
+ goto st263
+ }
+ goto st78
+ st263:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof263
+ }
+ stCase263:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st264
+ }
+ default:
+ goto st264
+ }
+ goto st78
+ st264:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof264
+ }
+ stCase264:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st265
+ }
+ default:
+ goto st265
+ }
+ goto st78
+ st265:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof265
+ }
+ stCase265:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st266
+ }
+ default:
+ goto st266
+ }
+ goto st78
+ st266:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof266
+ }
+ stCase266:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st267
+ }
+ default:
+ goto st267
+ }
+ goto st78
+ st267:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof267
+ }
+ stCase267:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st268
+ }
+ default:
+ goto st268
+ }
+ goto st78
+ st268:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof268
+ }
+ stCase268:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st269
+ }
+ default:
+ goto st269
+ }
+ goto st78
+ st269:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof269
+ }
+ stCase269:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st270
+ }
+ default:
+ goto st270
+ }
+ goto st78
+ st270:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof270
+ }
+ stCase270:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st271
+ }
+ default:
+ goto st271
+ }
+ goto st78
+ st271:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof271
+ }
+ stCase271:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st272
+ }
+ default:
+ goto st272
+ }
+ goto st78
+ st272:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof272
+ }
+ stCase272:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st273
+ }
+ default:
+ goto st273
+ }
+ goto st78
+ st273:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof273
+ }
+ stCase273:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st274
+ }
+ default:
+ goto st274
+ }
+ goto st78
+ st274:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof274
+ }
+ stCase274:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st275
+ }
+ default:
+ goto st275
+ }
+ goto st78
+ st275:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof275
+ }
+ stCase275:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st276
+ }
+ default:
+ goto st276
+ }
+ goto st78
+ st276:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof276
+ }
+ stCase276:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st277
+ }
+ default:
+ goto st277
+ }
+ goto st78
+ st277:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof277
+ }
+ stCase277:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st278
+ }
+ default:
+ goto st278
+ }
+ goto st78
+ st278:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof278
+ }
+ stCase278:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st279
+ }
+ default:
+ goto st279
+ }
+ goto st78
+ st279:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof279
+ }
+ stCase279:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st280
+ }
+ default:
+ goto st280
+ }
+ goto st78
+ st280:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof280
+ }
+ stCase280:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st281
+ }
+ default:
+ goto st281
+ }
+ goto st78
+ st281:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof281
+ }
+ stCase281:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st282
+ }
+ default:
+ goto st282
+ }
+ goto st78
+ st282:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof282
+ }
+ stCase282:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st283
+ }
+ default:
+ goto st283
+ }
+ goto st78
+ st283:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof283
+ }
+ stCase283:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st284
+ }
+ default:
+ goto st284
+ }
+ goto st78
+ st284:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof284
+ }
+ stCase284:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st285
+ }
+ default:
+ goto st285
+ }
+ goto st78
+ st285:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof285
+ }
+ stCase285:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st286
+ }
+ default:
+ goto st286
+ }
+ goto st78
+ st286:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof286
+ }
+ stCase286:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st287
+ }
+ default:
+ goto st287
+ }
+ goto st78
+ st287:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof287
+ }
+ stCase287:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st288
+ }
+ default:
+ goto st288
+ }
+ goto st78
+ st288:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof288
+ }
+ stCase288:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st289
+ }
+ default:
+ goto st289
+ }
+ goto st78
+ st289:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof289
+ }
+ stCase289:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st290
+ }
+ default:
+ goto st290
+ }
+ goto st78
+ st290:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof290
+ }
+ stCase290:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st291
+ }
+ default:
+ goto st291
+ }
+ goto st78
+ st291:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof291
+ }
+ stCase291:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st292
+ }
+ default:
+ goto st292
+ }
+ goto st78
+ st292:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof292
+ }
+ stCase292:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st293
+ }
+ default:
+ goto st293
+ }
+ goto st78
+ st293:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof293
+ }
+ stCase293:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st294
+ }
+ default:
+ goto st294
+ }
+ goto st78
+ st294:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof294
+ }
+ stCase294:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st295
+ }
+ default:
+ goto st295
+ }
+ goto st78
+ st295:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof295
+ }
+ stCase295:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st296
+ }
+ default:
+ goto st296
+ }
+ goto st78
+ st296:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof296
+ }
+ stCase296:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st297
+ }
+ default:
+ goto st297
+ }
+ goto st78
+ st297:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof297
+ }
+ stCase297:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st298
+ }
+ default:
+ goto st298
+ }
+ goto st78
+ st298:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof298
+ }
+ stCase298:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st299
+ }
+ default:
+ goto st299
+ }
+ goto st78
+ st299:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof299
+ }
+ stCase299:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st300
+ }
+ default:
+ goto st300
+ }
+ goto st78
+ st300:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof300
+ }
+ stCase300:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st301
+ }
+ default:
+ goto st301
+ }
+ goto st78
+ st301:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof301
+ }
+ stCase301:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st302
+ }
+ default:
+ goto st302
+ }
+ goto st78
+ st302:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof302
+ }
+ stCase302:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st303
+ }
+ default:
+ goto st303
+ }
+ goto st78
+ st303:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof303
+ }
+ stCase303:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st304
+ }
+ default:
+ goto st304
+ }
+ goto st78
+ st304:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof304
+ }
+ stCase304:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st305
+ }
+ default:
+ goto st305
+ }
+ goto st78
+ st305:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof305
+ }
+ stCase305:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st306
+ }
+ default:
+ goto st306
+ }
+ goto st78
+ st306:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof306
+ }
+ stCase306:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st307
+ }
+ default:
+ goto st307
+ }
+ goto st78
+ st307:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof307
+ }
+ stCase307:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st308
+ }
+ default:
+ goto st308
+ }
+ goto st78
+ st308:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof308
+ }
+ stCase308:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st309
+ }
+ default:
+ goto st309
+ }
+ goto st78
+ st309:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof309
+ }
+ stCase309:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st310
+ }
+ default:
+ goto st310
+ }
+ goto st78
+ st310:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof310
+ }
+ stCase310:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st311
+ }
+ default:
+ goto st311
+ }
+ goto st78
+ st311:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof311
+ }
+ stCase311:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st312
+ }
+ default:
+ goto st312
+ }
+ goto st78
+ st312:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof312
+ }
+ stCase312:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st313
+ }
+ default:
+ goto st313
+ }
+ goto st78
+ st313:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof313
+ }
+ stCase313:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st314
+ }
+ default:
+ goto st314
+ }
+ goto st78
+ st314:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof314
+ }
+ stCase314:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st315
+ }
+ default:
+ goto st315
+ }
+ goto st78
+ st315:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof315
+ }
+ stCase315:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st316
+ }
+ default:
+ goto st316
+ }
+ goto st78
+ st316:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof316
+ }
+ stCase316:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st317
+ }
+ default:
+ goto st317
+ }
+ goto st78
+ st317:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof317
+ }
+ stCase317:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st318
+ }
+ default:
+ goto st318
+ }
+ goto st78
+ st318:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof318
+ }
+ stCase318:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st319
+ }
+ default:
+ goto st319
+ }
+ goto st78
+ st319:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof319
+ }
+ stCase319:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st320
+ }
+ default:
+ goto st320
+ }
+ goto st78
+ st320:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof320
+ }
+ stCase320:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st321
+ }
+ default:
+ goto st321
+ }
+ goto st78
+ st321:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof321
+ }
+ stCase321:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st322
+ }
+ default:
+ goto st322
+ }
+ goto st78
+ st322:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof322
+ }
+ stCase322:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st323
+ }
+ default:
+ goto st323
+ }
+ goto st78
+ st323:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof323
+ }
+ stCase323:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st324
+ }
+ default:
+ goto st324
+ }
+ goto st78
+ st324:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof324
+ }
+ stCase324:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st325
+ }
+ default:
+ goto st325
+ }
+ goto st78
+ st325:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof325
+ }
+ stCase325:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st326
+ }
+ default:
+ goto st326
+ }
+ goto st78
+ st326:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof326
+ }
+ stCase326:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st327
+ }
+ default:
+ goto st327
+ }
+ goto st78
+ st327:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof327
+ }
+ stCase327:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st328
+ }
+ default:
+ goto st328
+ }
+ goto st78
+ st328:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof328
+ }
+ stCase328:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st329
+ }
+ default:
+ goto st329
+ }
+ goto st78
+ st329:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof329
+ }
+ stCase329:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st330
+ }
+ default:
+ goto st330
+ }
+ goto st78
+ st330:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof330
+ }
+ stCase330:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st331
+ }
+ default:
+ goto st331
+ }
+ goto st78
+ st331:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof331
+ }
+ stCase331:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st332
+ }
+ default:
+ goto st332
+ }
+ goto st78
+ st332:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof332
+ }
+ stCase332:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st333
+ }
+ default:
+ goto st333
+ }
+ goto st78
+ st333:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof333
+ }
+ stCase333:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st334
+ }
+ default:
+ goto st334
+ }
+ goto st78
+ st334:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof334
+ }
+ stCase334:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st335
+ }
+ default:
+ goto st335
+ }
+ goto st78
+ st335:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof335
+ }
+ stCase335:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st336
+ }
+ default:
+ goto st336
+ }
+ goto st78
+ st336:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof336
+ }
+ stCase336:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st337
+ }
+ default:
+ goto st337
+ }
+ goto st78
+ st337:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof337
+ }
+ stCase337:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st338
+ }
+ default:
+ goto st338
+ }
+ goto st78
+ st338:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof338
+ }
+ stCase338:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st339
+ }
+ default:
+ goto st339
+ }
+ goto st78
+ st339:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof339
+ }
+ stCase339:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st340
+ }
+ default:
+ goto st340
+ }
+ goto st78
+ st340:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof340
+ }
+ stCase340:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st341
+ }
+ default:
+ goto st341
+ }
+ goto st78
+ st341:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof341
+ }
+ stCase341:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st342
+ }
+ default:
+ goto st342
+ }
+ goto st78
+ st342:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof342
+ }
+ stCase342:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st343
+ }
+ default:
+ goto st343
+ }
+ goto st78
+ st343:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof343
+ }
+ stCase343:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st344
+ }
+ default:
+ goto st344
+ }
+ goto st78
+ st344:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof344
+ }
+ stCase344:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st345
+ }
+ default:
+ goto st345
+ }
+ goto st78
+ st345:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof345
+ }
+ stCase345:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st346
+ }
+ default:
+ goto st346
+ }
+ goto st78
+ st346:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof346
+ }
+ stCase346:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st347
+ }
+ default:
+ goto st347
+ }
+ goto st78
+ st347:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof347
+ }
+ stCase347:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st348
+ }
+ default:
+ goto st348
+ }
+ goto st78
+ st348:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof348
+ }
+ stCase348:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st349
+ }
+ default:
+ goto st349
+ }
+ goto st78
+ st349:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof349
+ }
+ stCase349:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st350
+ }
+ default:
+ goto st350
+ }
+ goto st78
+ st350:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof350
+ }
+ stCase350:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st351
+ }
+ default:
+ goto st351
+ }
+ goto st78
+ st351:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof351
+ }
+ stCase351:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st352
+ }
+ default:
+ goto st352
+ }
+ goto st78
+ st352:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof352
+ }
+ stCase352:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st353
+ }
+ default:
+ goto st353
+ }
+ goto st78
+ st353:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof353
+ }
+ stCase353:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st354
+ }
+ default:
+ goto st354
+ }
+ goto st78
+ st354:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof354
+ }
+ stCase354:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st355
+ }
+ default:
+ goto st355
+ }
+ goto st78
+ st355:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof355
+ }
+ stCase355:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st356
+ }
+ default:
+ goto st356
+ }
+ goto st78
+ st356:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof356
+ }
+ stCase356:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st357
+ }
+ default:
+ goto st357
+ }
+ goto st78
+ st357:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof357
+ }
+ stCase357:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st358
+ }
+ default:
+ goto st358
+ }
+ goto st78
+ st358:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof358
+ }
+ stCase358:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st359
+ }
+ default:
+ goto st359
+ }
+ goto st78
+ st359:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof359
+ }
+ stCase359:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st360
+ }
+ default:
+ goto st360
+ }
+ goto st78
+ st360:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof360
+ }
+ stCase360:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st361
+ }
+ default:
+ goto st361
+ }
+ goto st78
+ st361:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof361
+ }
+ stCase361:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st362
+ }
+ default:
+ goto st362
+ }
+ goto st78
+ st362:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof362
+ }
+ stCase362:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st363
+ }
+ default:
+ goto st363
+ }
+ goto st78
+ st363:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof363
+ }
+ stCase363:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st364
+ }
+ default:
+ goto st364
+ }
+ goto st78
+ st364:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof364
+ }
+ stCase364:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st365
+ }
+ default:
+ goto st365
+ }
+ goto st78
+ st365:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof365
+ }
+ stCase365:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st366
+ }
+ default:
+ goto st366
+ }
+ goto st78
+ st366:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof366
+ }
+ stCase366:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st367
+ }
+ default:
+ goto st367
+ }
+ goto st78
+ st367:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof367
+ }
+ stCase367:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st368
+ }
+ default:
+ goto st368
+ }
+ goto st78
+ st368:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof368
+ }
+ stCase368:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st369
+ }
+ default:
+ goto st369
+ }
+ goto st78
+ st369:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof369
+ }
+ stCase369:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st370
+ }
+ default:
+ goto st370
+ }
+ goto st78
+ st370:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof370
+ }
+ stCase370:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st371
+ }
+ default:
+ goto st371
+ }
+ goto st78
+ st371:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof371
+ }
+ stCase371:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st372
+ }
+ default:
+ goto st372
+ }
+ goto st78
+ st372:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof372
+ }
+ stCase372:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st373
+ }
+ default:
+ goto st373
+ }
+ goto st78
+ st373:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof373
+ }
+ stCase373:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st374
+ }
+ default:
+ goto st374
+ }
+ goto st78
+ st374:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof374
+ }
+ stCase374:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st375
+ }
+ default:
+ goto st375
+ }
+ goto st78
+ st375:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof375
+ }
+ stCase375:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st376
+ }
+ default:
+ goto st376
+ }
+ goto st78
+ st376:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof376
+ }
+ stCase376:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st377
+ }
+ default:
+ goto st377
+ }
+ goto st78
+ st377:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof377
+ }
+ stCase377:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st378
+ }
+ default:
+ goto st378
+ }
+ goto st78
+ st378:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof378
+ }
+ stCase378:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st379
+ }
+ default:
+ goto st379
+ }
+ goto st78
+ st379:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof379
+ }
+ stCase379:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st380
+ }
+ default:
+ goto st380
+ }
+ goto st78
+ st380:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof380
+ }
+ stCase380:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st381
+ }
+ default:
+ goto st381
+ }
+ goto st78
+ st381:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof381
+ }
+ stCase381:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st382
+ }
+ default:
+ goto st382
+ }
+ goto st78
+ st382:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof382
+ }
+ stCase382:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st383
+ }
+ default:
+ goto st383
+ }
+ goto st78
+ st383:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof383
+ }
+ stCase383:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st384
+ }
+ default:
+ goto st384
+ }
+ goto st78
+ st384:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof384
+ }
+ stCase384:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ goto st78
+ tr242:
+
+ output.tag = string(m.text())
+
+ goto st385
+ st385:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof385
+ }
+ stCase385:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st179
+ case 93:
+ goto tr451
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr450
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr450:
+
+ m.pb = m.p
+
+ goto st386
+ st386:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof386
+ }
+ stCase386:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st180
+ case 93:
+ goto tr453
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st387
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st387:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof387
+ }
+ stCase387:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st181
+ case 93:
+ goto tr455
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st388
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st388:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof388
+ }
+ stCase388:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st182
+ case 93:
+ goto tr457
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st389
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st389:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof389
+ }
+ stCase389:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st183
+ case 93:
+ goto tr459
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st390
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st390:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof390
+ }
+ stCase390:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st184
+ case 93:
+ goto tr461
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st391
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st391:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof391
+ }
+ stCase391:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st185
+ case 93:
+ goto tr463
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st392
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st392:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof392
+ }
+ stCase392:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st186
+ case 93:
+ goto tr465
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st393
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st393:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof393
+ }
+ stCase393:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st187
+ case 93:
+ goto tr467
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st394
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st394:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof394
+ }
+ stCase394:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st188
+ case 93:
+ goto tr469
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st395
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st395:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof395
+ }
+ stCase395:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st189
+ case 93:
+ goto tr471
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st396
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st396:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof396
+ }
+ stCase396:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st190
+ case 93:
+ goto tr473
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st397
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st397:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof397
+ }
+ stCase397:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st191
+ case 93:
+ goto tr475
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st398
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st398:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof398
+ }
+ stCase398:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st192
+ case 93:
+ goto tr477
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st399
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st399:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof399
+ }
+ stCase399:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st193
+ case 93:
+ goto tr479
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st400
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st400:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof400
+ }
+ stCase400:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st194
+ case 93:
+ goto tr481
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st401
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st401:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof401
+ }
+ stCase401:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st195
+ case 93:
+ goto tr483
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st402
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st402:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof402
+ }
+ stCase402:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st196
+ case 93:
+ goto tr485
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st403
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st403:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof403
+ }
+ stCase403:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st197
+ case 93:
+ goto tr487
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st404
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st404:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof404
+ }
+ stCase404:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st198
+ case 93:
+ goto tr489
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st405
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st405:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof405
+ }
+ stCase405:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st199
+ case 93:
+ goto tr491
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st406
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st406:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof406
+ }
+ stCase406:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st200
+ case 93:
+ goto tr493
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st407
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st407:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof407
+ }
+ stCase407:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st201
+ case 93:
+ goto tr495
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st408
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st408:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof408
+ }
+ stCase408:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st202
+ case 93:
+ goto tr497
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st409
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st409:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof409
+ }
+ stCase409:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st203
+ case 93:
+ goto tr499
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st410
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st410:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof410
+ }
+ stCase410:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st204
+ case 93:
+ goto tr501
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st411
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st411:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof411
+ }
+ stCase411:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st205
+ case 93:
+ goto tr503
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st412
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st412:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof412
+ }
+ stCase412:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st206
+ case 93:
+ goto tr505
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st413
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st413:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof413
+ }
+ stCase413:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st207
+ case 93:
+ goto tr507
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st414
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st414:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof414
+ }
+ stCase414:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st208
+ case 93:
+ goto tr509
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st415
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st415:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof415
+ }
+ stCase415:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st209
+ case 93:
+ goto tr511
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st416
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st416:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof416
+ }
+ stCase416:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st210
+ case 93:
+ goto tr513
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st417
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st417:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof417
+ }
+ stCase417:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st211
+ case 93:
+ goto tr515
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st418
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st418:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof418
+ }
+ stCase418:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st212
+ case 93:
+ goto tr517
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st419
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st419:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof419
+ }
+ stCase419:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st213
+ case 93:
+ goto tr519
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st420
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st420:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof420
+ }
+ stCase420:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st214
+ case 93:
+ goto tr521
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st421
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st421:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof421
+ }
+ stCase421:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st215
+ case 93:
+ goto tr523
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st422
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st422:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof422
+ }
+ stCase422:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st216
+ case 93:
+ goto tr525
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st423
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st423:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof423
+ }
+ stCase423:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st217
+ case 93:
+ goto tr527
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st424
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st424:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof424
+ }
+ stCase424:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st218
+ case 93:
+ goto tr529
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st425
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st425:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof425
+ }
+ stCase425:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st219
+ case 93:
+ goto tr531
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st426
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st426:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof426
+ }
+ stCase426:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st220
+ case 93:
+ goto tr533
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st427
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st427:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof427
+ }
+ stCase427:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st221
+ case 93:
+ goto tr535
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st428
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st428:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof428
+ }
+ stCase428:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st222
+ case 93:
+ goto tr537
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st429
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st429:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof429
+ }
+ stCase429:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st223
+ case 93:
+ goto tr539
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st430
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st430:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof430
+ }
+ stCase430:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st224
+ case 93:
+ goto tr541
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st431
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st431:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof431
+ }
+ stCase431:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st225
+ case 93:
+ goto tr543
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st432
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st432:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof432
+ }
+ stCase432:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st226
+ case 93:
+ goto tr545
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st433
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st433:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof433
+ }
+ stCase433:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st227
+ case 93:
+ goto tr547
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st434
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st434:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof434
+ }
+ stCase434:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st228
+ case 93:
+ goto tr549
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st435
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st435:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof435
+ }
+ stCase435:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st229
+ case 93:
+ goto tr551
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st436
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st436:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof436
+ }
+ stCase436:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st230
+ case 93:
+ goto tr553
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st437
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st437:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof437
+ }
+ stCase437:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st231
+ case 93:
+ goto tr555
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st438
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st438:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof438
+ }
+ stCase438:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st232
+ case 93:
+ goto tr557
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st439
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st439:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof439
+ }
+ stCase439:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st233
+ case 93:
+ goto tr559
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st440
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st440:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof440
+ }
+ stCase440:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st234
+ case 93:
+ goto tr561
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st441
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st441:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof441
+ }
+ stCase441:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st235
+ case 93:
+ goto tr563
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st442
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st442:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof442
+ }
+ stCase442:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st236
+ case 93:
+ goto tr565
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st443
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st443:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof443
+ }
+ stCase443:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st237
+ case 93:
+ goto tr567
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st444
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st444:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof444
+ }
+ stCase444:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st238
+ case 93:
+ goto tr569
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st445
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st445:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof445
+ }
+ stCase445:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st239
+ case 93:
+ goto tr571
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st446
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st446:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof446
+ }
+ stCase446:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st240
+ case 93:
+ goto tr573
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st447
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st447:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof447
+ }
+ stCase447:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st241
+ case 93:
+ goto tr575
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st448
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st448:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof448
+ }
+ stCase448:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st242
+ case 93:
+ goto tr577
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st449
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st449:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof449
+ }
+ stCase449:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st243
+ case 93:
+ goto tr579
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st450
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st450:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof450
+ }
+ stCase450:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st244
+ case 93:
+ goto tr581
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st451
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st451:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof451
+ }
+ stCase451:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st245
+ case 93:
+ goto tr583
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st452
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st452:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof452
+ }
+ stCase452:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st246
+ case 93:
+ goto tr585
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st453
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st453:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof453
+ }
+ stCase453:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st247
+ case 93:
+ goto tr587
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st454
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st454:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof454
+ }
+ stCase454:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st248
+ case 93:
+ goto tr589
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st455
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st455:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof455
+ }
+ stCase455:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st249
+ case 93:
+ goto tr591
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st456
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st456:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof456
+ }
+ stCase456:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st250
+ case 93:
+ goto tr593
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st457
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st457:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof457
+ }
+ stCase457:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st251
+ case 93:
+ goto tr595
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st458
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st458:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof458
+ }
+ stCase458:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st252
+ case 93:
+ goto tr597
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st459
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st459:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof459
+ }
+ stCase459:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st253
+ case 93:
+ goto tr599
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st460
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st460:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof460
+ }
+ stCase460:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st254
+ case 93:
+ goto tr601
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st461
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st461:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof461
+ }
+ stCase461:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st255
+ case 93:
+ goto tr603
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st462
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st462:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof462
+ }
+ stCase462:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st256
+ case 93:
+ goto tr605
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st463
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st463:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof463
+ }
+ stCase463:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st257
+ case 93:
+ goto tr607
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st464
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st464:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof464
+ }
+ stCase464:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st258
+ case 93:
+ goto tr609
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st465
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st465:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof465
+ }
+ stCase465:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st259
+ case 93:
+ goto tr611
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st466
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st466:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof466
+ }
+ stCase466:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st260
+ case 93:
+ goto tr613
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st467
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st467:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof467
+ }
+ stCase467:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st261
+ case 93:
+ goto tr615
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st468
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st468:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof468
+ }
+ stCase468:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st262
+ case 93:
+ goto tr617
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st469
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st469:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof469
+ }
+ stCase469:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st263
+ case 93:
+ goto tr619
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st470
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st470:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof470
+ }
+ stCase470:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st264
+ case 93:
+ goto tr621
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st471
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st471:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof471
+ }
+ stCase471:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st265
+ case 93:
+ goto tr623
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st472
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st472:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof472
+ }
+ stCase472:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st266
+ case 93:
+ goto tr625
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st473
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st473:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof473
+ }
+ stCase473:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st267
+ case 93:
+ goto tr627
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st474
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st474:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof474
+ }
+ stCase474:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st268
+ case 93:
+ goto tr629
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st475
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st475:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof475
+ }
+ stCase475:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st269
+ case 93:
+ goto tr631
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st476
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st476:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof476
+ }
+ stCase476:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st270
+ case 93:
+ goto tr633
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st477
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st477:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof477
+ }
+ stCase477:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st271
+ case 93:
+ goto tr635
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st478
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st478:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof478
+ }
+ stCase478:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st272
+ case 93:
+ goto tr637
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st479
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st479:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof479
+ }
+ stCase479:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st273
+ case 93:
+ goto tr639
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st480
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st480:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof480
+ }
+ stCase480:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st274
+ case 93:
+ goto tr641
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st481
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st481:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof481
+ }
+ stCase481:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st275
+ case 93:
+ goto tr643
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st482
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st482:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof482
+ }
+ stCase482:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st276
+ case 93:
+ goto tr645
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st483
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st483:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof483
+ }
+ stCase483:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st277
+ case 93:
+ goto tr647
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st484
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st484:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof484
+ }
+ stCase484:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st278
+ case 93:
+ goto tr649
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st485
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st485:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof485
+ }
+ stCase485:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st279
+ case 93:
+ goto tr651
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st486
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st486:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof486
+ }
+ stCase486:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st280
+ case 93:
+ goto tr653
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st487
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st487:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof487
+ }
+ stCase487:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st281
+ case 93:
+ goto tr655
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st488
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st488:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof488
+ }
+ stCase488:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st282
+ case 93:
+ goto tr657
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st489
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st489:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof489
+ }
+ stCase489:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st283
+ case 93:
+ goto tr659
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st490
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st490:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof490
+ }
+ stCase490:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st284
+ case 93:
+ goto tr661
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st491
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st491:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof491
+ }
+ stCase491:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st285
+ case 93:
+ goto tr663
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st492
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st492:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof492
+ }
+ stCase492:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st286
+ case 93:
+ goto tr665
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st493
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st493:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof493
+ }
+ stCase493:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st287
+ case 93:
+ goto tr667
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st494
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st494:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof494
+ }
+ stCase494:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st288
+ case 93:
+ goto tr669
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st495
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st495:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof495
+ }
+ stCase495:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st289
+ case 93:
+ goto tr671
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st496
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st496:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof496
+ }
+ stCase496:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st290
+ case 93:
+ goto tr673
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st497
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st497:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof497
+ }
+ stCase497:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st291
+ case 93:
+ goto tr675
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st498
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st498:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof498
+ }
+ stCase498:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st292
+ case 93:
+ goto tr677
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st499
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st499:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof499
+ }
+ stCase499:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st293
+ case 93:
+ goto tr679
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st500
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st500:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof500
+ }
+ stCase500:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st294
+ case 93:
+ goto tr681
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st501
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st501:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof501
+ }
+ stCase501:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st295
+ case 93:
+ goto tr683
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st502
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st502:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof502
+ }
+ stCase502:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st296
+ case 93:
+ goto tr685
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st503
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st503:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof503
+ }
+ stCase503:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st297
+ case 93:
+ goto tr687
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st504
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st504:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof504
+ }
+ stCase504:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st298
+ case 93:
+ goto tr689
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st505
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st505:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof505
+ }
+ stCase505:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st299
+ case 93:
+ goto tr691
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st506
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st506:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof506
+ }
+ stCase506:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st300
+ case 93:
+ goto tr693
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st507
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st507:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof507
+ }
+ stCase507:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st301
+ case 93:
+ goto tr695
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st508
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st508:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof508
+ }
+ stCase508:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st302
+ case 93:
+ goto tr697
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st509
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st509:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof509
+ }
+ stCase509:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st303
+ case 93:
+ goto tr699
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st510
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st510:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof510
+ }
+ stCase510:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st304
+ case 93:
+ goto tr701
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st511
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st511:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof511
+ }
+ stCase511:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st305
+ case 93:
+ goto tr703
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st512
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st512:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof512
+ }
+ stCase512:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st306
+ case 93:
+ goto tr705
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st513
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st513:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof513
+ }
+ stCase513:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st307
+ case 93:
+ goto tr707
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st514
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st514:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof514
+ }
+ stCase514:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st308
+ case 93:
+ goto tr709
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st515
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st515:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof515
+ }
+ stCase515:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st309
+ case 93:
+ goto tr711
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st516
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st516:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof516
+ }
+ stCase516:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st310
+ case 93:
+ goto tr713
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st517
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st517:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof517
+ }
+ stCase517:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st311
+ case 93:
+ goto tr715
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st518
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st518:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof518
+ }
+ stCase518:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st312
+ case 93:
+ goto tr717
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st519
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st519:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof519
+ }
+ stCase519:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st313
+ case 93:
+ goto tr719
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st520
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st520:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof520
+ }
+ stCase520:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st314
+ case 93:
+ goto tr721
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st521
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st521:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof521
+ }
+ stCase521:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st315
+ case 93:
+ goto tr723
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st522
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st522:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof522
+ }
+ stCase522:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st316
+ case 93:
+ goto tr725
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st523
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st523:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof523
+ }
+ stCase523:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st317
+ case 93:
+ goto tr727
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st524
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st524:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof524
+ }
+ stCase524:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st318
+ case 93:
+ goto tr729
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st525
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st525:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof525
+ }
+ stCase525:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st319
+ case 93:
+ goto tr731
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st526
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st526:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof526
+ }
+ stCase526:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st320
+ case 93:
+ goto tr733
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st527
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st527:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof527
+ }
+ stCase527:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st321
+ case 93:
+ goto tr735
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st528
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st528:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof528
+ }
+ stCase528:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st322
+ case 93:
+ goto tr737
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st529
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st529:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof529
+ }
+ stCase529:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st323
+ case 93:
+ goto tr739
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st530
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st530:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof530
+ }
+ stCase530:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st324
+ case 93:
+ goto tr741
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st531
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st531:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof531
+ }
+ stCase531:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st325
+ case 93:
+ goto tr743
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st532
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st532:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof532
+ }
+ stCase532:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st326
+ case 93:
+ goto tr745
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st533
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st533:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof533
+ }
+ stCase533:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st327
+ case 93:
+ goto tr747
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st534
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st534:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof534
+ }
+ stCase534:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st328
+ case 93:
+ goto tr749
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st535
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st535:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof535
+ }
+ stCase535:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st329
+ case 93:
+ goto tr751
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st536
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st536:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof536
+ }
+ stCase536:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st330
+ case 93:
+ goto tr753
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st537
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st537:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof537
+ }
+ stCase537:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st331
+ case 93:
+ goto tr755
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st538
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st538:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof538
+ }
+ stCase538:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st332
+ case 93:
+ goto tr757
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st539
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st539:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof539
+ }
+ stCase539:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st333
+ case 93:
+ goto tr759
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st540
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st540:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof540
+ }
+ stCase540:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st334
+ case 93:
+ goto tr761
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st541
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st541:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof541
+ }
+ stCase541:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st335
+ case 93:
+ goto tr763
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st542
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st542:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof542
+ }
+ stCase542:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st336
+ case 93:
+ goto tr765
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st543
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st543:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof543
+ }
+ stCase543:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st337
+ case 93:
+ goto tr767
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st544
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st544:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof544
+ }
+ stCase544:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st338
+ case 93:
+ goto tr769
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st545
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st545:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof545
+ }
+ stCase545:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st339
+ case 93:
+ goto tr771
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st546
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st546:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof546
+ }
+ stCase546:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st340
+ case 93:
+ goto tr773
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st547
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st547:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof547
+ }
+ stCase547:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st341
+ case 93:
+ goto tr775
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st548
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st548:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof548
+ }
+ stCase548:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st342
+ case 93:
+ goto tr777
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st549
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st549:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof549
+ }
+ stCase549:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st343
+ case 93:
+ goto tr779
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st550
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st550:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof550
+ }
+ stCase550:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st344
+ case 93:
+ goto tr781
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st551
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st551:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof551
+ }
+ stCase551:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st345
+ case 93:
+ goto tr783
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st552
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st552:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof552
+ }
+ stCase552:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st346
+ case 93:
+ goto tr785
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st553
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st553:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof553
+ }
+ stCase553:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st347
+ case 93:
+ goto tr787
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st554
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st554:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof554
+ }
+ stCase554:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st348
+ case 93:
+ goto tr789
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st555
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st555:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof555
+ }
+ stCase555:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st349
+ case 93:
+ goto tr791
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st556
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st556:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof556
+ }
+ stCase556:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st350
+ case 93:
+ goto tr793
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st557
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st557:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof557
+ }
+ stCase557:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st351
+ case 93:
+ goto tr795
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st558
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st558:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof558
+ }
+ stCase558:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st352
+ case 93:
+ goto tr797
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st559
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st559:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof559
+ }
+ stCase559:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st353
+ case 93:
+ goto tr799
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st560
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st560:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof560
+ }
+ stCase560:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st354
+ case 93:
+ goto tr801
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st561
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st561:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof561
+ }
+ stCase561:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st355
+ case 93:
+ goto tr803
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st562
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st562:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof562
+ }
+ stCase562:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st356
+ case 93:
+ goto tr805
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st563
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st563:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof563
+ }
+ stCase563:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st357
+ case 93:
+ goto tr807
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st564
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st564:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof564
+ }
+ stCase564:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st358
+ case 93:
+ goto tr809
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st565
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st565:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof565
+ }
+ stCase565:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st359
+ case 93:
+ goto tr811
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st566
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st566:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof566
+ }
+ stCase566:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st360
+ case 93:
+ goto tr813
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st567
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st567:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof567
+ }
+ stCase567:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st361
+ case 93:
+ goto tr815
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st568
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st568:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof568
+ }
+ stCase568:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st362
+ case 93:
+ goto tr817
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st569
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st569:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof569
+ }
+ stCase569:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st363
+ case 93:
+ goto tr819
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st570
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st570:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof570
+ }
+ stCase570:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st364
+ case 93:
+ goto tr821
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st571
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st571:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof571
+ }
+ stCase571:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st365
+ case 93:
+ goto tr823
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st572
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st572:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof572
+ }
+ stCase572:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st366
+ case 93:
+ goto tr825
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st573
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st573:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof573
+ }
+ stCase573:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st367
+ case 93:
+ goto tr827
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st574
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st574:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof574
+ }
+ stCase574:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st368
+ case 93:
+ goto tr829
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st575
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st575:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof575
+ }
+ stCase575:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st369
+ case 93:
+ goto tr831
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st576
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st576:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof576
+ }
+ stCase576:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st370
+ case 93:
+ goto tr833
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st577
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st577:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof577
+ }
+ stCase577:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st371
+ case 93:
+ goto tr835
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st578
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st578:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof578
+ }
+ stCase578:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st372
+ case 93:
+ goto tr837
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st579
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st579:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof579
+ }
+ stCase579:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st373
+ case 93:
+ goto tr839
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st580
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st580:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof580
+ }
+ stCase580:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st374
+ case 93:
+ goto tr841
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st581
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st581:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof581
+ }
+ stCase581:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st375
+ case 93:
+ goto tr843
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st582
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st582:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof582
+ }
+ stCase582:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st376
+ case 93:
+ goto tr845
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st583
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st583:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof583
+ }
+ stCase583:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st377
+ case 93:
+ goto tr847
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st584
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st584:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof584
+ }
+ stCase584:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st378
+ case 93:
+ goto tr849
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st585
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st585:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof585
+ }
+ stCase585:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st379
+ case 93:
+ goto tr851
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st586
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st586:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof586
+ }
+ stCase586:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st380
+ case 93:
+ goto tr853
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st587
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st587:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof587
+ }
+ stCase587:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st381
+ case 93:
+ goto tr855
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st588
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st588:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof588
+ }
+ stCase588:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st382
+ case 93:
+ goto tr857
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st589
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st589:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof589
+ }
+ stCase589:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st383
+ case 93:
+ goto tr859
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st590
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st590:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof590
+ }
+ stCase590:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st384
+ case 93:
+ goto tr861
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st591
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st591:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof591
+ }
+ stCase591:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 93:
+ goto tr147
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr449
+ }
+ case (m.data)[(m.p)] > 90:
+ if 92 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st129
+ }
+ default:
+ goto st129
+ }
+ goto st78
+ tr861:
+
+ output.content = string(m.text())
+
+ goto st592
+ st592:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof592
+ }
+ stCase592:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ goto st78
+ tr859:
+
+ output.content = string(m.text())
+
+ goto st593
+ st593:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof593
+ }
+ stCase593:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st384
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr857:
+
+ output.content = string(m.text())
+
+ goto st594
+ st594:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof594
+ }
+ stCase594:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st383
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr855:
+
+ output.content = string(m.text())
+
+ goto st595
+ st595:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof595
+ }
+ stCase595:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st382
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr853:
+
+ output.content = string(m.text())
+
+ goto st596
+ st596:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof596
+ }
+ stCase596:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st381
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr851:
+
+ output.content = string(m.text())
+
+ goto st597
+ st597:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof597
+ }
+ stCase597:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st380
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr849:
+
+ output.content = string(m.text())
+
+ goto st598
+ st598:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof598
+ }
+ stCase598:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st379
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr847:
+
+ output.content = string(m.text())
+
+ goto st599
+ st599:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof599
+ }
+ stCase599:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st378
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr845:
+
+ output.content = string(m.text())
+
+ goto st600
+ st600:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof600
+ }
+ stCase600:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st377
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr843:
+
+ output.content = string(m.text())
+
+ goto st601
+ st601:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof601
+ }
+ stCase601:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st376
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr841:
+
+ output.content = string(m.text())
+
+ goto st602
+ st602:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof602
+ }
+ stCase602:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st375
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr839:
+
+ output.content = string(m.text())
+
+ goto st603
+ st603:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof603
+ }
+ stCase603:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st374
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr837:
+
+ output.content = string(m.text())
+
+ goto st604
+ st604:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof604
+ }
+ stCase604:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st373
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr835:
+
+ output.content = string(m.text())
+
+ goto st605
+ st605:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof605
+ }
+ stCase605:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st372
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr833:
+
+ output.content = string(m.text())
+
+ goto st606
+ st606:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof606
+ }
+ stCase606:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st371
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr831:
+
+ output.content = string(m.text())
+
+ goto st607
+ st607:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof607
+ }
+ stCase607:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st370
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr829:
+
+ output.content = string(m.text())
+
+ goto st608
+ st608:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof608
+ }
+ stCase608:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st369
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr827:
+
+ output.content = string(m.text())
+
+ goto st609
+ st609:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof609
+ }
+ stCase609:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st368
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr825:
+
+ output.content = string(m.text())
+
+ goto st610
+ st610:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof610
+ }
+ stCase610:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st367
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr823:
+
+ output.content = string(m.text())
+
+ goto st611
+ st611:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof611
+ }
+ stCase611:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st366
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr821:
+
+ output.content = string(m.text())
+
+ goto st612
+ st612:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof612
+ }
+ stCase612:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st365
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr819:
+
+ output.content = string(m.text())
+
+ goto st613
+ st613:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof613
+ }
+ stCase613:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st364
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr817:
+
+ output.content = string(m.text())
+
+ goto st614
+ st614:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof614
+ }
+ stCase614:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st363
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr815:
+
+ output.content = string(m.text())
+
+ goto st615
+ st615:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof615
+ }
+ stCase615:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st362
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr813:
+
+ output.content = string(m.text())
+
+ goto st616
+ st616:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof616
+ }
+ stCase616:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st361
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr811:
+
+ output.content = string(m.text())
+
+ goto st617
+ st617:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof617
+ }
+ stCase617:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st360
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr809:
+
+ output.content = string(m.text())
+
+ goto st618
+ st618:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof618
+ }
+ stCase618:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st359
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr807:
+
+ output.content = string(m.text())
+
+ goto st619
+ st619:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof619
+ }
+ stCase619:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st358
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr805:
+
+ output.content = string(m.text())
+
+ goto st620
+ st620:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof620
+ }
+ stCase620:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st357
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr803:
+
+ output.content = string(m.text())
+
+ goto st621
+ st621:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof621
+ }
+ stCase621:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st356
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr801:
+
+ output.content = string(m.text())
+
+ goto st622
+ st622:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof622
+ }
+ stCase622:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st355
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr799:
+
+ output.content = string(m.text())
+
+ goto st623
+ st623:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof623
+ }
+ stCase623:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st354
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr797:
+
+ output.content = string(m.text())
+
+ goto st624
+ st624:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof624
+ }
+ stCase624:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st353
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr795:
+
+ output.content = string(m.text())
+
+ goto st625
+ st625:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof625
+ }
+ stCase625:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st352
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr793:
+
+ output.content = string(m.text())
+
+ goto st626
+ st626:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof626
+ }
+ stCase626:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st351
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr791:
+
+ output.content = string(m.text())
+
+ goto st627
+ st627:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof627
+ }
+ stCase627:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st350
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr789:
+
+ output.content = string(m.text())
+
+ goto st628
+ st628:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof628
+ }
+ stCase628:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st349
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr787:
+
+ output.content = string(m.text())
+
+ goto st629
+ st629:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof629
+ }
+ stCase629:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st348
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr785:
+
+ output.content = string(m.text())
+
+ goto st630
+ st630:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof630
+ }
+ stCase630:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st347
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr783:
+
+ output.content = string(m.text())
+
+ goto st631
+ st631:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof631
+ }
+ stCase631:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st346
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr781:
+
+ output.content = string(m.text())
+
+ goto st632
+ st632:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof632
+ }
+ stCase632:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st345
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr779:
+
+ output.content = string(m.text())
+
+ goto st633
+ st633:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof633
+ }
+ stCase633:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st344
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr777:
+
+ output.content = string(m.text())
+
+ goto st634
+ st634:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof634
+ }
+ stCase634:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st343
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr775:
+
+ output.content = string(m.text())
+
+ goto st635
+ st635:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof635
+ }
+ stCase635:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st342
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr773:
+
+ output.content = string(m.text())
+
+ goto st636
+ st636:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof636
+ }
+ stCase636:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st341
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr771:
+
+ output.content = string(m.text())
+
+ goto st637
+ st637:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof637
+ }
+ stCase637:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st340
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr769:
+
+ output.content = string(m.text())
+
+ goto st638
+ st638:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof638
+ }
+ stCase638:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st339
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr767:
+
+ output.content = string(m.text())
+
+ goto st639
+ st639:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof639
+ }
+ stCase639:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st338
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr765:
+
+ output.content = string(m.text())
+
+ goto st640
+ st640:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof640
+ }
+ stCase640:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st337
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr763:
+
+ output.content = string(m.text())
+
+ goto st641
+ st641:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof641
+ }
+ stCase641:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st336
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr761:
+
+ output.content = string(m.text())
+
+ goto st642
+ st642:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof642
+ }
+ stCase642:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st335
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr759:
+
+ output.content = string(m.text())
+
+ goto st643
+ st643:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof643
+ }
+ stCase643:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st334
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr757:
+
+ output.content = string(m.text())
+
+ goto st644
+ st644:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof644
+ }
+ stCase644:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st333
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr755:
+
+ output.content = string(m.text())
+
+ goto st645
+ st645:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof645
+ }
+ stCase645:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st332
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr753:
+
+ output.content = string(m.text())
+
+ goto st646
+ st646:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof646
+ }
+ stCase646:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st331
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr751:
+
+ output.content = string(m.text())
+
+ goto st647
+ st647:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof647
+ }
+ stCase647:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st330
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr749:
+
+ output.content = string(m.text())
+
+ goto st648
+ st648:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof648
+ }
+ stCase648:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st329
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr747:
+
+ output.content = string(m.text())
+
+ goto st649
+ st649:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof649
+ }
+ stCase649:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st328
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr745:
+
+ output.content = string(m.text())
+
+ goto st650
+ st650:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof650
+ }
+ stCase650:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st327
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr743:
+
+ output.content = string(m.text())
+
+ goto st651
+ st651:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof651
+ }
+ stCase651:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st326
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr741:
+
+ output.content = string(m.text())
+
+ goto st652
+ st652:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof652
+ }
+ stCase652:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st325
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr739:
+
+ output.content = string(m.text())
+
+ goto st653
+ st653:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof653
+ }
+ stCase653:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st324
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr737:
+
+ output.content = string(m.text())
+
+ goto st654
+ st654:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof654
+ }
+ stCase654:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st323
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr735:
+
+ output.content = string(m.text())
+
+ goto st655
+ st655:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof655
+ }
+ stCase655:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st322
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr733:
+
+ output.content = string(m.text())
+
+ goto st656
+ st656:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof656
+ }
+ stCase656:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st321
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr731:
+
+ output.content = string(m.text())
+
+ goto st657
+ st657:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof657
+ }
+ stCase657:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st320
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr729:
+
+ output.content = string(m.text())
+
+ goto st658
+ st658:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof658
+ }
+ stCase658:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st319
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr727:
+
+ output.content = string(m.text())
+
+ goto st659
+ st659:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof659
+ }
+ stCase659:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st318
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr725:
+
+ output.content = string(m.text())
+
+ goto st660
+ st660:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof660
+ }
+ stCase660:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st317
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr723:
+
+ output.content = string(m.text())
+
+ goto st661
+ st661:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof661
+ }
+ stCase661:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st316
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr721:
+
+ output.content = string(m.text())
+
+ goto st662
+ st662:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof662
+ }
+ stCase662:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st315
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr719:
+
+ output.content = string(m.text())
+
+ goto st663
+ st663:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof663
+ }
+ stCase663:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st314
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr717:
+
+ output.content = string(m.text())
+
+ goto st664
+ st664:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof664
+ }
+ stCase664:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st313
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr715:
+
+ output.content = string(m.text())
+
+ goto st665
+ st665:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof665
+ }
+ stCase665:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st312
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr713:
+
+ output.content = string(m.text())
+
+ goto st666
+ st666:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof666
+ }
+ stCase666:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st311
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr711:
+
+ output.content = string(m.text())
+
+ goto st667
+ st667:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof667
+ }
+ stCase667:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st310
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr709:
+
+ output.content = string(m.text())
+
+ goto st668
+ st668:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof668
+ }
+ stCase668:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st309
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr707:
+
+ output.content = string(m.text())
+
+ goto st669
+ st669:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof669
+ }
+ stCase669:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st308
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr705:
+
+ output.content = string(m.text())
+
+ goto st670
+ st670:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof670
+ }
+ stCase670:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st307
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr703:
+
+ output.content = string(m.text())
+
+ goto st671
+ st671:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof671
+ }
+ stCase671:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st306
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr701:
+
+ output.content = string(m.text())
+
+ goto st672
+ st672:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof672
+ }
+ stCase672:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st305
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr699:
+
+ output.content = string(m.text())
+
+ goto st673
+ st673:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof673
+ }
+ stCase673:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st304
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr697:
+
+ output.content = string(m.text())
+
+ goto st674
+ st674:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof674
+ }
+ stCase674:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st303
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr695:
+
+ output.content = string(m.text())
+
+ goto st675
+ st675:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof675
+ }
+ stCase675:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st302
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr693:
+
+ output.content = string(m.text())
+
+ goto st676
+ st676:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof676
+ }
+ stCase676:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st301
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr691:
+
+ output.content = string(m.text())
+
+ goto st677
+ st677:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof677
+ }
+ stCase677:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st300
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr689:
+
+ output.content = string(m.text())
+
+ goto st678
+ st678:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof678
+ }
+ stCase678:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st299
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr687:
+
+ output.content = string(m.text())
+
+ goto st679
+ st679:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof679
+ }
+ stCase679:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st298
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr685:
+
+ output.content = string(m.text())
+
+ goto st680
+ st680:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof680
+ }
+ stCase680:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st297
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr683:
+
+ output.content = string(m.text())
+
+ goto st681
+ st681:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof681
+ }
+ stCase681:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st296
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr681:
+
+ output.content = string(m.text())
+
+ goto st682
+ st682:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof682
+ }
+ stCase682:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st295
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr679:
+
+ output.content = string(m.text())
+
+ goto st683
+ st683:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof683
+ }
+ stCase683:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st294
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr677:
+
+ output.content = string(m.text())
+
+ goto st684
+ st684:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof684
+ }
+ stCase684:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st293
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr675:
+
+ output.content = string(m.text())
+
+ goto st685
+ st685:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof685
+ }
+ stCase685:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st292
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr673:
+
+ output.content = string(m.text())
+
+ goto st686
+ st686:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof686
+ }
+ stCase686:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st291
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr671:
+
+ output.content = string(m.text())
+
+ goto st687
+ st687:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof687
+ }
+ stCase687:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st290
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr669:
+
+ output.content = string(m.text())
+
+ goto st688
+ st688:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof688
+ }
+ stCase688:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st289
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr667:
+
+ output.content = string(m.text())
+
+ goto st689
+ st689:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof689
+ }
+ stCase689:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st288
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr665:
+
+ output.content = string(m.text())
+
+ goto st690
+ st690:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof690
+ }
+ stCase690:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st287
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr663:
+
+ output.content = string(m.text())
+
+ goto st691
+ st691:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof691
+ }
+ stCase691:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st286
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr661:
+
+ output.content = string(m.text())
+
+ goto st692
+ st692:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof692
+ }
+ stCase692:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st285
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr659:
+
+ output.content = string(m.text())
+
+ goto st693
+ st693:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof693
+ }
+ stCase693:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st284
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr657:
+
+ output.content = string(m.text())
+
+ goto st694
+ st694:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof694
+ }
+ stCase694:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st283
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr655:
+
+ output.content = string(m.text())
+
+ goto st695
+ st695:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof695
+ }
+ stCase695:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st282
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr653:
+
+ output.content = string(m.text())
+
+ goto st696
+ st696:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof696
+ }
+ stCase696:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st281
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr651:
+
+ output.content = string(m.text())
+
+ goto st697
+ st697:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof697
+ }
+ stCase697:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st280
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr649:
+
+ output.content = string(m.text())
+
+ goto st698
+ st698:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof698
+ }
+ stCase698:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st279
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr647:
+
+ output.content = string(m.text())
+
+ goto st699
+ st699:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof699
+ }
+ stCase699:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st278
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr645:
+
+ output.content = string(m.text())
+
+ goto st700
+ st700:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof700
+ }
+ stCase700:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st277
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr643:
+
+ output.content = string(m.text())
+
+ goto st701
+ st701:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof701
+ }
+ stCase701:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st276
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr641:
+
+ output.content = string(m.text())
+
+ goto st702
+ st702:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof702
+ }
+ stCase702:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st275
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr639:
+
+ output.content = string(m.text())
+
+ goto st703
+ st703:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof703
+ }
+ stCase703:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st274
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr637:
+
+ output.content = string(m.text())
+
+ goto st704
+ st704:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof704
+ }
+ stCase704:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st273
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr635:
+
+ output.content = string(m.text())
+
+ goto st705
+ st705:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof705
+ }
+ stCase705:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st272
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr633:
+
+ output.content = string(m.text())
+
+ goto st706
+ st706:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof706
+ }
+ stCase706:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st271
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr631:
+
+ output.content = string(m.text())
+
+ goto st707
+ st707:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof707
+ }
+ stCase707:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st270
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr629:
+
+ output.content = string(m.text())
+
+ goto st708
+ st708:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof708
+ }
+ stCase708:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st269
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr627:
+
+ output.content = string(m.text())
+
+ goto st709
+ st709:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof709
+ }
+ stCase709:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st268
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr625:
+
+ output.content = string(m.text())
+
+ goto st710
+ st710:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof710
+ }
+ stCase710:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st267
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr623:
+
+ output.content = string(m.text())
+
+ goto st711
+ st711:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof711
+ }
+ stCase711:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st266
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr621:
+
+ output.content = string(m.text())
+
+ goto st712
+ st712:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof712
+ }
+ stCase712:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st265
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr619:
+
+ output.content = string(m.text())
+
+ goto st713
+ st713:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof713
+ }
+ stCase713:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st264
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr617:
+
+ output.content = string(m.text())
+
+ goto st714
+ st714:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof714
+ }
+ stCase714:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st263
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr615:
+
+ output.content = string(m.text())
+
+ goto st715
+ st715:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof715
+ }
+ stCase715:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st262
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr613:
+
+ output.content = string(m.text())
+
+ goto st716
+ st716:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof716
+ }
+ stCase716:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st261
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr611:
+
+ output.content = string(m.text())
+
+ goto st717
+ st717:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof717
+ }
+ stCase717:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st260
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr609:
+
+ output.content = string(m.text())
+
+ goto st718
+ st718:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof718
+ }
+ stCase718:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st259
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr607:
+
+ output.content = string(m.text())
+
+ goto st719
+ st719:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof719
+ }
+ stCase719:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st258
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr605:
+
+ output.content = string(m.text())
+
+ goto st720
+ st720:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof720
+ }
+ stCase720:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st257
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr603:
+
+ output.content = string(m.text())
+
+ goto st721
+ st721:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof721
+ }
+ stCase721:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st256
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr601:
+
+ output.content = string(m.text())
+
+ goto st722
+ st722:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof722
+ }
+ stCase722:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st255
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr599:
+
+ output.content = string(m.text())
+
+ goto st723
+ st723:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof723
+ }
+ stCase723:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st254
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr597:
+
+ output.content = string(m.text())
+
+ goto st724
+ st724:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof724
+ }
+ stCase724:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st253
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr595:
+
+ output.content = string(m.text())
+
+ goto st725
+ st725:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof725
+ }
+ stCase725:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st252
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr593:
+
+ output.content = string(m.text())
+
+ goto st726
+ st726:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof726
+ }
+ stCase726:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st251
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr591:
+
+ output.content = string(m.text())
+
+ goto st727
+ st727:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof727
+ }
+ stCase727:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st250
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr589:
+
+ output.content = string(m.text())
+
+ goto st728
+ st728:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof728
+ }
+ stCase728:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st249
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr587:
+
+ output.content = string(m.text())
+
+ goto st729
+ st729:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof729
+ }
+ stCase729:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st248
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr585:
+
+ output.content = string(m.text())
+
+ goto st730
+ st730:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof730
+ }
+ stCase730:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st247
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr583:
+
+ output.content = string(m.text())
+
+ goto st731
+ st731:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof731
+ }
+ stCase731:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st246
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr581:
+
+ output.content = string(m.text())
+
+ goto st732
+ st732:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof732
+ }
+ stCase732:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st245
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr579:
+
+ output.content = string(m.text())
+
+ goto st733
+ st733:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof733
+ }
+ stCase733:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st244
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr577:
+
+ output.content = string(m.text())
+
+ goto st734
+ st734:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof734
+ }
+ stCase734:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st243
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr575:
+
+ output.content = string(m.text())
+
+ goto st735
+ st735:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof735
+ }
+ stCase735:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st242
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr573:
+
+ output.content = string(m.text())
+
+ goto st736
+ st736:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof736
+ }
+ stCase736:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st241
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr571:
+
+ output.content = string(m.text())
+
+ goto st737
+ st737:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof737
+ }
+ stCase737:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st240
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr569:
+
+ output.content = string(m.text())
+
+ goto st738
+ st738:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof738
+ }
+ stCase738:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st239
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr567:
+
+ output.content = string(m.text())
+
+ goto st739
+ st739:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof739
+ }
+ stCase739:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st238
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr565:
+
+ output.content = string(m.text())
+
+ goto st740
+ st740:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof740
+ }
+ stCase740:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st237
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr563:
+
+ output.content = string(m.text())
+
+ goto st741
+ st741:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof741
+ }
+ stCase741:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st236
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr561:
+
+ output.content = string(m.text())
+
+ goto st742
+ st742:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof742
+ }
+ stCase742:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st235
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr559:
+
+ output.content = string(m.text())
+
+ goto st743
+ st743:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof743
+ }
+ stCase743:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st234
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr557:
+
+ output.content = string(m.text())
+
+ goto st744
+ st744:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof744
+ }
+ stCase744:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st233
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr555:
+
+ output.content = string(m.text())
+
+ goto st745
+ st745:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof745
+ }
+ stCase745:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st232
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr553:
+
+ output.content = string(m.text())
+
+ goto st746
+ st746:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof746
+ }
+ stCase746:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st231
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr551:
+
+ output.content = string(m.text())
+
+ goto st747
+ st747:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof747
+ }
+ stCase747:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st230
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr549:
+
+ output.content = string(m.text())
+
+ goto st748
+ st748:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof748
+ }
+ stCase748:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st229
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr547:
+
+ output.content = string(m.text())
+
+ goto st749
+ st749:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof749
+ }
+ stCase749:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st228
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr545:
+
+ output.content = string(m.text())
+
+ goto st750
+ st750:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof750
+ }
+ stCase750:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st227
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr543:
+
+ output.content = string(m.text())
+
+ goto st751
+ st751:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof751
+ }
+ stCase751:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st226
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr541:
+
+ output.content = string(m.text())
+
+ goto st752
+ st752:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof752
+ }
+ stCase752:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st225
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr539:
+
+ output.content = string(m.text())
+
+ goto st753
+ st753:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof753
+ }
+ stCase753:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st224
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr537:
+
+ output.content = string(m.text())
+
+ goto st754
+ st754:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof754
+ }
+ stCase754:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st223
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr535:
+
+ output.content = string(m.text())
+
+ goto st755
+ st755:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof755
+ }
+ stCase755:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st222
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr533:
+
+ output.content = string(m.text())
+
+ goto st756
+ st756:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof756
+ }
+ stCase756:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st221
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr531:
+
+ output.content = string(m.text())
+
+ goto st757
+ st757:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof757
+ }
+ stCase757:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st220
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr529:
+
+ output.content = string(m.text())
+
+ goto st758
+ st758:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof758
+ }
+ stCase758:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st219
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr527:
+
+ output.content = string(m.text())
+
+ goto st759
+ st759:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof759
+ }
+ stCase759:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st218
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr525:
+
+ output.content = string(m.text())
+
+ goto st760
+ st760:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof760
+ }
+ stCase760:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st217
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr523:
+
+ output.content = string(m.text())
+
+ goto st761
+ st761:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof761
+ }
+ stCase761:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st216
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr521:
+
+ output.content = string(m.text())
+
+ goto st762
+ st762:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof762
+ }
+ stCase762:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st215
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr519:
+
+ output.content = string(m.text())
+
+ goto st763
+ st763:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof763
+ }
+ stCase763:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st214
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr517:
+
+ output.content = string(m.text())
+
+ goto st764
+ st764:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof764
+ }
+ stCase764:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st213
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr515:
+
+ output.content = string(m.text())
+
+ goto st765
+ st765:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof765
+ }
+ stCase765:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st212
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr513:
+
+ output.content = string(m.text())
+
+ goto st766
+ st766:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof766
+ }
+ stCase766:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st211
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr511:
+
+ output.content = string(m.text())
+
+ goto st767
+ st767:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof767
+ }
+ stCase767:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st210
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr509:
+
+ output.content = string(m.text())
+
+ goto st768
+ st768:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof768
+ }
+ stCase768:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st209
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr507:
+
+ output.content = string(m.text())
+
+ goto st769
+ st769:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof769
+ }
+ stCase769:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st208
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr505:
+
+ output.content = string(m.text())
+
+ goto st770
+ st770:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof770
+ }
+ stCase770:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st207
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr503:
+
+ output.content = string(m.text())
+
+ goto st771
+ st771:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof771
+ }
+ stCase771:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st206
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr501:
+
+ output.content = string(m.text())
+
+ goto st772
+ st772:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof772
+ }
+ stCase772:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st205
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr499:
+
+ output.content = string(m.text())
+
+ goto st773
+ st773:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof773
+ }
+ stCase773:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st204
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr497:
+
+ output.content = string(m.text())
+
+ goto st774
+ st774:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof774
+ }
+ stCase774:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st203
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr495:
+
+ output.content = string(m.text())
+
+ goto st775
+ st775:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof775
+ }
+ stCase775:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st202
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr493:
+
+ output.content = string(m.text())
+
+ goto st776
+ st776:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof776
+ }
+ stCase776:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st201
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr491:
+
+ output.content = string(m.text())
+
+ goto st777
+ st777:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof777
+ }
+ stCase777:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st200
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr489:
+
+ output.content = string(m.text())
+
+ goto st778
+ st778:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof778
+ }
+ stCase778:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st199
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr487:
+
+ output.content = string(m.text())
+
+ goto st779
+ st779:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof779
+ }
+ stCase779:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st198
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr485:
+
+ output.content = string(m.text())
+
+ goto st780
+ st780:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof780
+ }
+ stCase780:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st197
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr483:
+
+ output.content = string(m.text())
+
+ goto st781
+ st781:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof781
+ }
+ stCase781:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st196
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr481:
+
+ output.content = string(m.text())
+
+ goto st782
+ st782:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof782
+ }
+ stCase782:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st195
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr479:
+
+ output.content = string(m.text())
+
+ goto st783
+ st783:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof783
+ }
+ stCase783:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st194
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr477:
+
+ output.content = string(m.text())
+
+ goto st784
+ st784:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof784
+ }
+ stCase784:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st193
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr475:
+
+ output.content = string(m.text())
+
+ goto st785
+ st785:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof785
+ }
+ stCase785:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st192
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr473:
+
+ output.content = string(m.text())
+
+ goto st786
+ st786:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof786
+ }
+ stCase786:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st191
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr471:
+
+ output.content = string(m.text())
+
+ goto st787
+ st787:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof787
+ }
+ stCase787:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st190
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr469:
+
+ output.content = string(m.text())
+
+ goto st788
+ st788:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof788
+ }
+ stCase788:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st189
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr467:
+
+ output.content = string(m.text())
+
+ goto st789
+ st789:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof789
+ }
+ stCase789:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st188
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr465:
+
+ output.content = string(m.text())
+
+ goto st790
+ st790:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof790
+ }
+ stCase790:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st187
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr463:
+
+ output.content = string(m.text())
+
+ goto st791
+ st791:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof791
+ }
+ stCase791:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st186
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr461:
+
+ output.content = string(m.text())
+
+ goto st792
+ st792:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof792
+ }
+ stCase792:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st185
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr459:
+
+ output.content = string(m.text())
+
+ goto st793
+ st793:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof793
+ }
+ stCase793:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st184
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr457:
+
+ output.content = string(m.text())
+
+ goto st794
+ st794:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof794
+ }
+ stCase794:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st183
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr455:
+
+ output.content = string(m.text())
+
+ goto st795
+ st795:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof795
+ }
+ stCase795:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st182
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr453:
+
+ output.content = string(m.text())
+
+ goto st796
+ st796:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof796
+ }
+ stCase796:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st181
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr451:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st797
+ tr865:
+
+ output.content = string(m.text())
+
+ goto st797
+ st797:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof797
+ }
+ stCase797:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st180
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr240:
+
+ output.tag = string(m.text())
+
+ goto st798
+ st798:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof798
+ }
+ stCase798:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st178
+ case 93:
+ goto tr863
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr862
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr862:
+
+ m.pb = m.p
+
+ goto st799
+ st799:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof799
+ }
+ stCase799:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st179
+ case 93:
+ goto tr865
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st386
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr863:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st800
+ tr870:
+
+ output.content = string(m.text())
+
+ goto st800
+ st800:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof800
+ }
+ stCase800:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st179
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr238:
+
+ output.tag = string(m.text())
+
+ goto st801
+ st801:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof801
+ }
+ stCase801:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st803
+ case 93:
+ goto tr868
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr866
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr866:
+
+ m.pb = m.p
+
+ goto st802
+ st802:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof802
+ }
+ stCase802:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st178
+ case 93:
+ goto tr870
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st799
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st803:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof803
+ }
+ stCase803:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st178
+ }
+ default:
+ goto st178
+ }
+ goto st78
+ tr868:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st804
+ tr875:
+
+ output.content = string(m.text())
+
+ goto st804
+ st804:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof804
+ }
+ stCase804:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st178
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr236:
+
+ output.tag = string(m.text())
+
+ goto st805
+ st805:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof805
+ }
+ stCase805:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st807
+ case 93:
+ goto tr873
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr871
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr871:
+
+ m.pb = m.p
+
+ goto st806
+ st806:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof806
+ }
+ stCase806:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st803
+ case 93:
+ goto tr875
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st802
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st807:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof807
+ }
+ stCase807:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st803
+ }
+ default:
+ goto st803
+ }
+ goto st78
+ tr873:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st808
+ tr880:
+
+ output.content = string(m.text())
+
+ goto st808
+ st808:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof808
+ }
+ stCase808:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st803
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr234:
+
+ output.tag = string(m.text())
+
+ goto st809
+ st809:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof809
+ }
+ stCase809:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st811
+ case 93:
+ goto tr878
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr876
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr876:
+
+ m.pb = m.p
+
+ goto st810
+ st810:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof810
+ }
+ stCase810:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st807
+ case 93:
+ goto tr880
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st806
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st811:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof811
+ }
+ stCase811:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st807
+ }
+ default:
+ goto st807
+ }
+ goto st78
+ tr878:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st812
+ tr885:
+
+ output.content = string(m.text())
+
+ goto st812
+ st812:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof812
+ }
+ stCase812:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st807
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr232:
+
+ output.tag = string(m.text())
+
+ goto st813
+ st813:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof813
+ }
+ stCase813:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st815
+ case 93:
+ goto tr883
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr881
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr881:
+
+ m.pb = m.p
+
+ goto st814
+ st814:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof814
+ }
+ stCase814:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st811
+ case 93:
+ goto tr885
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st810
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st815:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof815
+ }
+ stCase815:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st811
+ }
+ default:
+ goto st811
+ }
+ goto st78
+ tr883:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st816
+ tr890:
+
+ output.content = string(m.text())
+
+ goto st816
+ st816:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof816
+ }
+ stCase816:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st811
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr230:
+
+ output.tag = string(m.text())
+
+ goto st817
+ st817:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof817
+ }
+ stCase817:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st819
+ case 93:
+ goto tr888
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr886
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr886:
+
+ m.pb = m.p
+
+ goto st818
+ st818:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof818
+ }
+ stCase818:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st815
+ case 93:
+ goto tr890
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st814
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st819:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof819
+ }
+ stCase819:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st815
+ }
+ default:
+ goto st815
+ }
+ goto st78
+ tr888:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st820
+ tr895:
+
+ output.content = string(m.text())
+
+ goto st820
+ st820:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof820
+ }
+ stCase820:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st815
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr228:
+
+ output.tag = string(m.text())
+
+ goto st821
+ st821:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof821
+ }
+ stCase821:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st823
+ case 93:
+ goto tr893
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr891
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr891:
+
+ m.pb = m.p
+
+ goto st822
+ st822:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof822
+ }
+ stCase822:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st819
+ case 93:
+ goto tr895
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st818
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st823:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof823
+ }
+ stCase823:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st819
+ }
+ default:
+ goto st819
+ }
+ goto st78
+ tr893:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st824
+ tr900:
+
+ output.content = string(m.text())
+
+ goto st824
+ st824:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof824
+ }
+ stCase824:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st819
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr226:
+
+ output.tag = string(m.text())
+
+ goto st825
+ st825:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof825
+ }
+ stCase825:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st827
+ case 93:
+ goto tr898
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr896
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr896:
+
+ m.pb = m.p
+
+ goto st826
+ st826:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof826
+ }
+ stCase826:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st823
+ case 93:
+ goto tr900
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st822
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st827:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof827
+ }
+ stCase827:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st823
+ }
+ default:
+ goto st823
+ }
+ goto st78
+ tr898:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st828
+ tr905:
+
+ output.content = string(m.text())
+
+ goto st828
+ st828:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof828
+ }
+ stCase828:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st823
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr224:
+
+ output.tag = string(m.text())
+
+ goto st829
+ st829:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof829
+ }
+ stCase829:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st831
+ case 93:
+ goto tr903
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr901
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr901:
+
+ m.pb = m.p
+
+ goto st830
+ st830:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof830
+ }
+ stCase830:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st827
+ case 93:
+ goto tr905
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st826
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st831:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof831
+ }
+ stCase831:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st827
+ }
+ default:
+ goto st827
+ }
+ goto st78
+ tr903:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st832
+ tr910:
+
+ output.content = string(m.text())
+
+ goto st832
+ st832:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof832
+ }
+ stCase832:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st827
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr222:
+
+ output.tag = string(m.text())
+
+ goto st833
+ st833:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof833
+ }
+ stCase833:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st835
+ case 93:
+ goto tr908
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr906
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr906:
+
+ m.pb = m.p
+
+ goto st834
+ st834:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof834
+ }
+ stCase834:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st831
+ case 93:
+ goto tr910
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st830
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st835:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof835
+ }
+ stCase835:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st831
+ }
+ default:
+ goto st831
+ }
+ goto st78
+ tr908:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st836
+ tr915:
+
+ output.content = string(m.text())
+
+ goto st836
+ st836:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof836
+ }
+ stCase836:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st831
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr220:
+
+ output.tag = string(m.text())
+
+ goto st837
+ st837:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof837
+ }
+ stCase837:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st839
+ case 93:
+ goto tr913
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr911
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr911:
+
+ m.pb = m.p
+
+ goto st838
+ st838:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof838
+ }
+ stCase838:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st835
+ case 93:
+ goto tr915
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st834
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st839:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof839
+ }
+ stCase839:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st835
+ }
+ default:
+ goto st835
+ }
+ goto st78
+ tr913:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st840
+ tr920:
+
+ output.content = string(m.text())
+
+ goto st840
+ st840:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof840
+ }
+ stCase840:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st835
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr218:
+
+ output.tag = string(m.text())
+
+ goto st841
+ st841:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof841
+ }
+ stCase841:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st843
+ case 93:
+ goto tr918
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr916
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr916:
+
+ m.pb = m.p
+
+ goto st842
+ st842:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof842
+ }
+ stCase842:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st839
+ case 93:
+ goto tr920
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st838
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st843:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof843
+ }
+ stCase843:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st839
+ }
+ default:
+ goto st839
+ }
+ goto st78
+ tr918:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st844
+ tr925:
+
+ output.content = string(m.text())
+
+ goto st844
+ st844:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof844
+ }
+ stCase844:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st839
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr216:
+
+ output.tag = string(m.text())
+
+ goto st845
+ st845:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof845
+ }
+ stCase845:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st847
+ case 93:
+ goto tr923
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr921
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr921:
+
+ m.pb = m.p
+
+ goto st846
+ st846:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof846
+ }
+ stCase846:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st843
+ case 93:
+ goto tr925
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st842
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st847:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof847
+ }
+ stCase847:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st843
+ }
+ default:
+ goto st843
+ }
+ goto st78
+ tr923:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st848
+ tr930:
+
+ output.content = string(m.text())
+
+ goto st848
+ st848:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof848
+ }
+ stCase848:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st843
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr214:
+
+ output.tag = string(m.text())
+
+ goto st849
+ st849:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof849
+ }
+ stCase849:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st851
+ case 93:
+ goto tr928
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr926
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr926:
+
+ m.pb = m.p
+
+ goto st850
+ st850:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof850
+ }
+ stCase850:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st847
+ case 93:
+ goto tr930
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st846
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st851:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof851
+ }
+ stCase851:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st847
+ }
+ default:
+ goto st847
+ }
+ goto st78
+ tr928:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st852
+ tr935:
+
+ output.content = string(m.text())
+
+ goto st852
+ st852:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof852
+ }
+ stCase852:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st847
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr212:
+
+ output.tag = string(m.text())
+
+ goto st853
+ st853:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof853
+ }
+ stCase853:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st855
+ case 93:
+ goto tr933
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr931
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr931:
+
+ m.pb = m.p
+
+ goto st854
+ st854:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof854
+ }
+ stCase854:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st851
+ case 93:
+ goto tr935
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st850
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st855:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof855
+ }
+ stCase855:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st851
+ }
+ default:
+ goto st851
+ }
+ goto st78
+ tr933:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st856
+ tr940:
+
+ output.content = string(m.text())
+
+ goto st856
+ st856:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof856
+ }
+ stCase856:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st851
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr210:
+
+ output.tag = string(m.text())
+
+ goto st857
+ st857:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof857
+ }
+ stCase857:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st859
+ case 93:
+ goto tr938
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr936
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr936:
+
+ m.pb = m.p
+
+ goto st858
+ st858:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof858
+ }
+ stCase858:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st855
+ case 93:
+ goto tr940
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st854
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st859:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof859
+ }
+ stCase859:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st855
+ }
+ default:
+ goto st855
+ }
+ goto st78
+ tr938:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st860
+ tr945:
+
+ output.content = string(m.text())
+
+ goto st860
+ st860:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof860
+ }
+ stCase860:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st855
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr208:
+
+ output.tag = string(m.text())
+
+ goto st861
+ st861:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof861
+ }
+ stCase861:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st863
+ case 93:
+ goto tr943
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr941
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr941:
+
+ m.pb = m.p
+
+ goto st862
+ st862:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof862
+ }
+ stCase862:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st859
+ case 93:
+ goto tr945
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st858
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st863:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof863
+ }
+ stCase863:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st859
+ }
+ default:
+ goto st859
+ }
+ goto st78
+ tr943:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st864
+ tr950:
+
+ output.content = string(m.text())
+
+ goto st864
+ st864:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof864
+ }
+ stCase864:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st859
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr206:
+
+ output.tag = string(m.text())
+
+ goto st865
+ st865:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof865
+ }
+ stCase865:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st867
+ case 93:
+ goto tr948
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr946
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr946:
+
+ m.pb = m.p
+
+ goto st866
+ st866:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof866
+ }
+ stCase866:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st863
+ case 93:
+ goto tr950
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st862
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st867:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof867
+ }
+ stCase867:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st863
+ }
+ default:
+ goto st863
+ }
+ goto st78
+ tr948:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st868
+ tr955:
+
+ output.content = string(m.text())
+
+ goto st868
+ st868:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof868
+ }
+ stCase868:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st863
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr204:
+
+ output.tag = string(m.text())
+
+ goto st869
+ st869:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof869
+ }
+ stCase869:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st871
+ case 93:
+ goto tr953
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr951
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr951:
+
+ m.pb = m.p
+
+ goto st870
+ st870:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof870
+ }
+ stCase870:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st867
+ case 93:
+ goto tr955
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st866
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st871:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof871
+ }
+ stCase871:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st867
+ }
+ default:
+ goto st867
+ }
+ goto st78
+ tr953:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st872
+ tr960:
+
+ output.content = string(m.text())
+
+ goto st872
+ st872:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof872
+ }
+ stCase872:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st867
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr202:
+
+ output.tag = string(m.text())
+
+ goto st873
+ st873:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof873
+ }
+ stCase873:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st875
+ case 93:
+ goto tr958
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr956
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr956:
+
+ m.pb = m.p
+
+ goto st874
+ st874:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof874
+ }
+ stCase874:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st871
+ case 93:
+ goto tr960
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st870
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st875:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof875
+ }
+ stCase875:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st871
+ }
+ default:
+ goto st871
+ }
+ goto st78
+ tr958:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st876
+ tr965:
+
+ output.content = string(m.text())
+
+ goto st876
+ st876:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof876
+ }
+ stCase876:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st871
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr200:
+
+ output.tag = string(m.text())
+
+ goto st877
+ st877:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof877
+ }
+ stCase877:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st879
+ case 93:
+ goto tr963
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr961
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr961:
+
+ m.pb = m.p
+
+ goto st878
+ st878:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof878
+ }
+ stCase878:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st875
+ case 93:
+ goto tr965
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st874
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st879:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof879
+ }
+ stCase879:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st875
+ }
+ default:
+ goto st875
+ }
+ goto st78
+ tr963:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st880
+ tr970:
+
+ output.content = string(m.text())
+
+ goto st880
+ st880:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof880
+ }
+ stCase880:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st875
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr198:
+
+ output.tag = string(m.text())
+
+ goto st881
+ st881:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof881
+ }
+ stCase881:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st883
+ case 93:
+ goto tr968
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr966
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr966:
+
+ m.pb = m.p
+
+ goto st882
+ st882:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof882
+ }
+ stCase882:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st879
+ case 93:
+ goto tr970
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st878
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st883:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof883
+ }
+ stCase883:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st879
+ }
+ default:
+ goto st879
+ }
+ goto st78
+ tr968:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st884
+ tr975:
+
+ output.content = string(m.text())
+
+ goto st884
+ st884:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof884
+ }
+ stCase884:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st879
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr196:
+
+ output.tag = string(m.text())
+
+ goto st885
+ st885:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof885
+ }
+ stCase885:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st887
+ case 93:
+ goto tr973
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr971
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr971:
+
+ m.pb = m.p
+
+ goto st886
+ st886:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof886
+ }
+ stCase886:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st883
+ case 93:
+ goto tr975
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st882
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st887:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof887
+ }
+ stCase887:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st883
+ }
+ default:
+ goto st883
+ }
+ goto st78
+ tr973:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st888
+ tr980:
+
+ output.content = string(m.text())
+
+ goto st888
+ st888:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof888
+ }
+ stCase888:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st883
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr194:
+
+ output.tag = string(m.text())
+
+ goto st889
+ st889:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof889
+ }
+ stCase889:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st891
+ case 93:
+ goto tr978
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr976
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr976:
+
+ m.pb = m.p
+
+ goto st890
+ st890:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof890
+ }
+ stCase890:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st887
+ case 93:
+ goto tr980
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st886
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st891:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof891
+ }
+ stCase891:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st887
+ }
+ default:
+ goto st887
+ }
+ goto st78
+ tr978:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st892
+ tr985:
+
+ output.content = string(m.text())
+
+ goto st892
+ st892:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof892
+ }
+ stCase892:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st887
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr192:
+
+ output.tag = string(m.text())
+
+ goto st893
+ st893:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof893
+ }
+ stCase893:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st895
+ case 93:
+ goto tr983
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr981
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr981:
+
+ m.pb = m.p
+
+ goto st894
+ st894:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof894
+ }
+ stCase894:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st891
+ case 93:
+ goto tr985
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st890
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st895:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof895
+ }
+ stCase895:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st891
+ }
+ default:
+ goto st891
+ }
+ goto st78
+ tr983:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st896
+ tr990:
+
+ output.content = string(m.text())
+
+ goto st896
+ st896:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof896
+ }
+ stCase896:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st891
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr190:
+
+ output.tag = string(m.text())
+
+ goto st897
+ st897:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof897
+ }
+ stCase897:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st899
+ case 93:
+ goto tr988
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr986
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr986:
+
+ m.pb = m.p
+
+ goto st898
+ st898:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof898
+ }
+ stCase898:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st895
+ case 93:
+ goto tr990
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st894
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st899:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof899
+ }
+ stCase899:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st895
+ }
+ default:
+ goto st895
+ }
+ goto st78
+ tr988:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st900
+ tr995:
+
+ output.content = string(m.text())
+
+ goto st900
+ st900:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof900
+ }
+ stCase900:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st895
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr188:
+
+ output.tag = string(m.text())
+
+ goto st901
+ st901:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof901
+ }
+ stCase901:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st903
+ case 93:
+ goto tr993
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr991
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr991:
+
+ m.pb = m.p
+
+ goto st902
+ st902:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof902
+ }
+ stCase902:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st899
+ case 93:
+ goto tr995
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st898
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st903:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof903
+ }
+ stCase903:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st899
+ }
+ default:
+ goto st899
+ }
+ goto st78
+ tr993:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st904
+ tr1000:
+
+ output.content = string(m.text())
+
+ goto st904
+ st904:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof904
+ }
+ stCase904:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st899
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr186:
+
+ output.tag = string(m.text())
+
+ goto st905
+ st905:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof905
+ }
+ stCase905:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st907
+ case 93:
+ goto tr998
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr996
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr996:
+
+ m.pb = m.p
+
+ goto st906
+ st906:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof906
+ }
+ stCase906:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st903
+ case 93:
+ goto tr1000
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st902
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st907:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof907
+ }
+ stCase907:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st903
+ }
+ default:
+ goto st903
+ }
+ goto st78
+ tr998:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st908
+ tr1005:
+
+ output.content = string(m.text())
+
+ goto st908
+ st908:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof908
+ }
+ stCase908:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st903
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr184:
+
+ output.tag = string(m.text())
+
+ goto st909
+ st909:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof909
+ }
+ stCase909:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st911
+ case 93:
+ goto tr1003
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr1001
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr1001:
+
+ m.pb = m.p
+
+ goto st910
+ st910:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof910
+ }
+ stCase910:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st907
+ case 93:
+ goto tr1005
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st906
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st911:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof911
+ }
+ stCase911:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st907
+ }
+ default:
+ goto st907
+ }
+ goto st78
+ tr1003:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st912
+ tr1010:
+
+ output.content = string(m.text())
+
+ goto st912
+ st912:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof912
+ }
+ stCase912:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st907
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr182:
+
+ output.tag = string(m.text())
+
+ goto st913
+ st913:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof913
+ }
+ stCase913:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st915
+ case 93:
+ goto tr1008
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr1006
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr1006:
+
+ m.pb = m.p
+
+ goto st914
+ st914:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof914
+ }
+ stCase914:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st911
+ case 93:
+ goto tr1010
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st910
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st915:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof915
+ }
+ stCase915:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st911
+ }
+ default:
+ goto st911
+ }
+ goto st78
+ tr1008:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st916
+ tr1015:
+
+ output.content = string(m.text())
+
+ goto st916
+ st916:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof916
+ }
+ stCase916:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st911
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr180:
+
+ output.tag = string(m.text())
+
+ goto st917
+ st917:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof917
+ }
+ stCase917:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st919
+ case 93:
+ goto tr1013
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr1011
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr1011:
+
+ m.pb = m.p
+
+ goto st918
+ st918:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof918
+ }
+ stCase918:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st915
+ case 93:
+ goto tr1015
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st914
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st919:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof919
+ }
+ stCase919:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st915
+ }
+ default:
+ goto st915
+ }
+ goto st78
+ tr1013:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st920
+ tr1020:
+
+ output.content = string(m.text())
+
+ goto st920
+ st920:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof920
+ }
+ stCase920:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st915
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr178:
+
+ output.tag = string(m.text())
+
+ goto st921
+ st921:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof921
+ }
+ stCase921:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st923
+ case 93:
+ goto tr1018
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr1016
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr1016:
+
+ m.pb = m.p
+
+ goto st922
+ st922:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof922
+ }
+ stCase922:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st919
+ case 93:
+ goto tr1020
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st918
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st923:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof923
+ }
+ stCase923:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st919
+ }
+ default:
+ goto st919
+ }
+ goto st78
+ tr1018:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st924
+ tr1025:
+
+ output.content = string(m.text())
+
+ goto st924
+ st924:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof924
+ }
+ stCase924:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st919
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr176:
+
+ output.tag = string(m.text())
+
+ goto st925
+ st925:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof925
+ }
+ stCase925:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st927
+ case 93:
+ goto tr1023
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr1021
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr1021:
+
+ m.pb = m.p
+
+ goto st926
+ st926:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof926
+ }
+ stCase926:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st923
+ case 93:
+ goto tr1025
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st922
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st927:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof927
+ }
+ stCase927:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st923
+ }
+ default:
+ goto st923
+ }
+ goto st78
+ tr1023:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st928
+ tr1030:
+
+ output.content = string(m.text())
+
+ goto st928
+ st928:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof928
+ }
+ stCase928:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st923
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr174:
+
+ output.tag = string(m.text())
+
+ goto st929
+ st929:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof929
+ }
+ stCase929:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st931
+ case 93:
+ goto tr1028
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr1026
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr1026:
+
+ m.pb = m.p
+
+ goto st930
+ st930:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof930
+ }
+ stCase930:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st927
+ case 93:
+ goto tr1030
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st926
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st931:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof931
+ }
+ stCase931:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st927
+ }
+ default:
+ goto st927
+ }
+ goto st78
+ tr1028:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st932
+ tr1035:
+
+ output.content = string(m.text())
+
+ goto st932
+ st932:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof932
+ }
+ stCase932:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st927
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr172:
+
+ output.tag = string(m.text())
+
+ goto st933
+ st933:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof933
+ }
+ stCase933:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st935
+ case 93:
+ goto tr1033
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr1031
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr1031:
+
+ m.pb = m.p
+
+ goto st934
+ st934:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof934
+ }
+ stCase934:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st931
+ case 93:
+ goto tr1035
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st930
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st935:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof935
+ }
+ stCase935:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st931
+ }
+ default:
+ goto st931
+ }
+ goto st78
+ tr1033:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st936
+ tr1040:
+
+ output.content = string(m.text())
+
+ goto st936
+ st936:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof936
+ }
+ stCase936:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st931
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr170:
+
+ output.tag = string(m.text())
+
+ goto st937
+ st937:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof937
+ }
+ stCase937:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st939
+ case 93:
+ goto tr1038
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr1036
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr1036:
+
+ m.pb = m.p
+
+ goto st938
+ st938:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof938
+ }
+ stCase938:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st935
+ case 93:
+ goto tr1040
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st934
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st939:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof939
+ }
+ stCase939:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st935
+ }
+ default:
+ goto st935
+ }
+ goto st78
+ tr1038:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st940
+ tr1045:
+
+ output.content = string(m.text())
+
+ goto st940
+ st940:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof940
+ }
+ stCase940:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st935
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr168:
+
+ output.tag = string(m.text())
+
+ goto st941
+ st941:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof941
+ }
+ stCase941:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st943
+ case 93:
+ goto tr1043
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr1041
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr1041:
+
+ m.pb = m.p
+
+ goto st942
+ st942:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof942
+ }
+ stCase942:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st939
+ case 93:
+ goto tr1045
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st938
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st943:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof943
+ }
+ stCase943:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st939
+ }
+ default:
+ goto st939
+ }
+ goto st78
+ tr1043:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st944
+ tr1050:
+
+ output.content = string(m.text())
+
+ goto st944
+ st944:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof944
+ }
+ stCase944:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st939
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr166:
+
+ output.tag = string(m.text())
+
+ goto st945
+ st945:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof945
+ }
+ stCase945:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st947
+ case 93:
+ goto tr1048
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr1046
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr1046:
+
+ m.pb = m.p
+
+ goto st946
+ st946:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof946
+ }
+ stCase946:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st943
+ case 93:
+ goto tr1050
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st942
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st947:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof947
+ }
+ stCase947:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st943
+ }
+ default:
+ goto st943
+ }
+ goto st78
+ tr1048:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st948
+ tr1055:
+
+ output.content = string(m.text())
+
+ goto st948
+ st948:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof948
+ }
+ stCase948:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st943
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr164:
+
+ output.tag = string(m.text())
+
+ goto st949
+ st949:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof949
+ }
+ stCase949:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st951
+ case 93:
+ goto tr1053
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr1051
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr1051:
+
+ m.pb = m.p
+
+ goto st950
+ st950:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof950
+ }
+ stCase950:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st947
+ case 93:
+ goto tr1055
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st946
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st951:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof951
+ }
+ stCase951:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st947
+ }
+ default:
+ goto st947
+ }
+ goto st78
+ tr1053:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st952
+ tr1060:
+
+ output.content = string(m.text())
+
+ goto st952
+ st952:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof952
+ }
+ stCase952:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st947
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr162:
+
+ output.tag = string(m.text())
+
+ goto st953
+ st953:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof953
+ }
+ stCase953:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st955
+ case 93:
+ goto tr1058
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr1056
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr1056:
+
+ m.pb = m.p
+
+ goto st954
+ st954:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof954
+ }
+ stCase954:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st951
+ case 93:
+ goto tr1060
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st950
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st955:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof955
+ }
+ stCase955:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st951
+ }
+ default:
+ goto st951
+ }
+ goto st78
+ tr1058:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st956
+ tr1065:
+
+ output.content = string(m.text())
+
+ goto st956
+ st956:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof956
+ }
+ stCase956:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st951
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr160:
+
+ output.tag = string(m.text())
+
+ goto st957
+ st957:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof957
+ }
+ stCase957:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st959
+ case 93:
+ goto tr1063
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr1061
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr1061:
+
+ m.pb = m.p
+
+ goto st958
+ st958:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof958
+ }
+ stCase958:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st955
+ case 93:
+ goto tr1065
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st954
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st959:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof959
+ }
+ stCase959:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st955
+ }
+ default:
+ goto st955
+ }
+ goto st78
+ tr1063:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st960
+ tr1070:
+
+ output.content = string(m.text())
+
+ goto st960
+ st960:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof960
+ }
+ stCase960:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st955
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr158:
+
+ output.tag = string(m.text())
+
+ goto st961
+ st961:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof961
+ }
+ stCase961:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st963
+ case 93:
+ goto tr1068
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr1066
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr1066:
+
+ m.pb = m.p
+
+ goto st962
+ st962:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof962
+ }
+ stCase962:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st959
+ case 93:
+ goto tr1070
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st958
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st963:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof963
+ }
+ stCase963:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st959
+ }
+ default:
+ goto st959
+ }
+ goto st78
+ tr1068:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st964
+ tr1075:
+
+ output.content = string(m.text())
+
+ goto st964
+ st964:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof964
+ }
+ stCase964:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st959
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr156:
+
+ output.tag = string(m.text())
+
+ goto st965
+ st965:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof965
+ }
+ stCase965:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st967
+ case 93:
+ goto tr1073
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr1071
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr1071:
+
+ m.pb = m.p
+
+ goto st966
+ st966:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof966
+ }
+ stCase966:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st963
+ case 93:
+ goto tr1075
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st962
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st967:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof967
+ }
+ stCase967:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st963
+ }
+ default:
+ goto st963
+ }
+ goto st78
+ tr1073:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st968
+ tr1080:
+
+ output.content = string(m.text())
+
+ goto st968
+ st968:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof968
+ }
+ stCase968:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st963
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr154:
+
+ output.tag = string(m.text())
+
+ goto st969
+ st969:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof969
+ }
+ stCase969:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st971
+ case 93:
+ goto tr1078
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr1076
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr1076:
+
+ m.pb = m.p
+
+ goto st970
+ st970:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof970
+ }
+ stCase970:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st967
+ case 93:
+ goto tr1080
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st966
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st971:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof971
+ }
+ stCase971:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st967
+ }
+ default:
+ goto st967
+ }
+ goto st78
+ tr1078:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st972
+ tr1085:
+
+ output.content = string(m.text())
+
+ goto st972
+ st972:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof972
+ }
+ stCase972:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st967
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr152:
+
+ output.tag = string(m.text())
+
+ goto st973
+ st973:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof973
+ }
+ stCase973:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st975
+ case 93:
+ goto tr1083
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr1081
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr1081:
+
+ m.pb = m.p
+
+ goto st974
+ st974:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof974
+ }
+ stCase974:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st971
+ case 93:
+ goto tr1085
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st970
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st975:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof975
+ }
+ stCase975:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st971
+ }
+ default:
+ goto st971
+ }
+ goto st78
+ tr1083:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st976
+ tr1090:
+
+ output.content = string(m.text())
+
+ goto st976
+ st976:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof976
+ }
+ stCase976:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st971
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr150:
+
+ output.tag = string(m.text())
+
+ goto st977
+ st977:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof977
+ }
+ stCase977:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st979
+ case 93:
+ goto tr1088
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr1086
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr1086:
+
+ m.pb = m.p
+
+ goto st978
+ st978:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof978
+ }
+ stCase978:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st975
+ case 93:
+ goto tr1090
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st974
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st979:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof979
+ }
+ stCase979:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st975
+ }
+ default:
+ goto st975
+ }
+ goto st78
+ tr1088:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st980
+ tr1095:
+
+ output.content = string(m.text())
+
+ goto st980
+ st980:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof980
+ }
+ stCase980:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st975
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr89:
+
+ output.tag = string(m.text())
+
+ goto st981
+ st981:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof981
+ }
+ stCase981:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto tr144
+ case 91:
+ goto st983
+ case 93:
+ goto tr1093
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto tr1091
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ tr1091:
+
+ m.pb = m.p
+
+ goto st982
+ st982:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof982
+ }
+ stCase982:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st129
+ case 91:
+ goto st979
+ case 93:
+ goto tr1095
+ case 127:
+ goto tr449
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st978
+ }
+ default:
+ goto tr449
+ }
+ goto st78
+ st983:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof983
+ }
+ stCase983:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st979
+ }
+ default:
+ goto st979
+ }
+ goto st78
+ tr1093:
+
+ m.pb = m.p
+
+ output.content = string(m.text())
+
+ goto st984
+ st984:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof984
+ }
+ stCase984:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 58:
+ goto st126
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] > 31:
+ if 33 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st979
+ }
+ default:
+ goto tr85
+ }
+ goto st78
+ tr41:
+
+ m.pb = m.p
+
+ goto st985
+ st985:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof985
+ }
+ stCase985:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st986
+ }
+ default:
+ goto st986
+ }
+ goto st78
+ st986:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof986
+ }
+ stCase986:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr84
+ case 32:
+ goto tr86
+ case 127:
+ goto tr85
+ }
+ switch {
+ case (m.data)[(m.p)] < 33:
+ if (m.data)[(m.p)] <= 31 {
+ goto tr85
+ }
+ case (m.data)[(m.p)] > 57:
+ if 59 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 126 {
+ goto st983
+ }
+ default:
+ goto st983
+ }
+ goto st78
+ st21:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof21
+ }
+ stCase21:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 51 {
+ goto st13
+ }
+ goto tr7
+ st22:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof22
+ }
+ stCase22:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto st10
+ }
+ goto tr7
+ st23:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof23
+ }
+ stCase23:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 49 {
+ goto st10
+ }
+ goto tr7
+ st24:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof24
+ }
+ stCase24:
+ if (m.data)[(m.p)] == 103 {
+ goto st7
+ }
+ goto tr7
+ tr9:
+
+ m.pb = m.p
+
+ goto st25
+ st25:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof25
+ }
+ stCase25:
+ if (m.data)[(m.p)] == 101 {
+ goto st26
+ }
+ goto tr7
+ st26:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof26
+ }
+ stCase26:
+ if (m.data)[(m.p)] == 99 {
+ goto st7
+ }
+ goto tr7
+ tr10:
+
+ m.pb = m.p
+
+ goto st27
+ st27:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof27
+ }
+ stCase27:
+ if (m.data)[(m.p)] == 101 {
+ goto st28
+ }
+ goto tr7
+ st28:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof28
+ }
+ stCase28:
+ if (m.data)[(m.p)] == 98 {
+ goto st7
+ }
+ goto tr7
+ tr11:
+
+ m.pb = m.p
+
+ goto st29
+ st29:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof29
+ }
+ stCase29:
+ switch (m.data)[(m.p)] {
+ case 97:
+ goto st30
+ case 117:
+ goto st31
+ }
+ goto tr7
+ st30:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof30
+ }
+ stCase30:
+ if (m.data)[(m.p)] == 110 {
+ goto st7
+ }
+ goto tr7
+ st31:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof31
+ }
+ stCase31:
+ switch (m.data)[(m.p)] {
+ case 108:
+ goto st7
+ case 110:
+ goto st7
+ }
+ goto tr7
+ tr12:
+
+ m.pb = m.p
+
+ goto st32
+ st32:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof32
+ }
+ stCase32:
+ if (m.data)[(m.p)] == 97 {
+ goto st33
+ }
+ goto tr7
+ st33:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof33
+ }
+ stCase33:
+ switch (m.data)[(m.p)] {
+ case 114:
+ goto st7
+ case 121:
+ goto st7
+ }
+ goto tr7
+ tr13:
+
+ m.pb = m.p
+
+ goto st34
+ st34:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof34
+ }
+ stCase34:
+ if (m.data)[(m.p)] == 111 {
+ goto st35
+ }
+ goto tr7
+ st35:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof35
+ }
+ stCase35:
+ if (m.data)[(m.p)] == 118 {
+ goto st7
+ }
+ goto tr7
+ tr14:
+
+ m.pb = m.p
+
+ goto st36
+ st36:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof36
+ }
+ stCase36:
+ if (m.data)[(m.p)] == 99 {
+ goto st37
+ }
+ goto tr7
+ st37:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof37
+ }
+ stCase37:
+ if (m.data)[(m.p)] == 116 {
+ goto st7
+ }
+ goto tr7
+ tr15:
+
+ m.pb = m.p
+
+ goto st38
+ st38:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof38
+ }
+ stCase38:
+ if (m.data)[(m.p)] == 101 {
+ goto st39
+ }
+ goto tr7
+ st39:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof39
+ }
+ stCase39:
+ if (m.data)[(m.p)] == 112 {
+ goto st7
+ }
+ goto tr7
+ tr16:
+
+ m.pb = m.p
+
+ goto st40
+ st40:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof40
+ }
+ stCase40:
+ _widec = int16((m.data)[(m.p)])
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if 560 <= _widec && _widec <= 569 {
+ goto st41
+ }
+ goto st0
+ st41:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof41
+ }
+ stCase41:
+ _widec = int16((m.data)[(m.p)])
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if 560 <= _widec && _widec <= 569 {
+ goto st42
+ }
+ goto st0
+ st42:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof42
+ }
+ stCase42:
+ _widec = int16((m.data)[(m.p)])
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if 560 <= _widec && _widec <= 569 {
+ goto st43
+ }
+ goto st0
+ st43:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof43
+ }
+ stCase43:
+ _widec = int16((m.data)[(m.p)])
+ if 45 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 45 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if _widec == 557 {
+ goto st44
+ }
+ goto st0
+ st44:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof44
+ }
+ stCase44:
+ _widec = int16((m.data)[(m.p)])
+ switch {
+ case (m.data)[(m.p)] > 48:
+ if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 49 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ case (m.data)[(m.p)] >= 48:
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ switch _widec {
+ case 560:
+ goto st45
+ case 561:
+ goto st69
+ }
+ goto st0
+ st45:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof45
+ }
+ stCase45:
+ _widec = int16((m.data)[(m.p)])
+ if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if 561 <= _widec && _widec <= 569 {
+ goto st46
+ }
+ goto st0
+ st46:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof46
+ }
+ stCase46:
+ _widec = int16((m.data)[(m.p)])
+ if 45 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 45 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if _widec == 557 {
+ goto st47
+ }
+ goto st0
+ st47:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof47
+ }
+ stCase47:
+ _widec = int16((m.data)[(m.p)])
+ switch {
+ case (m.data)[(m.p)] < 49:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 48 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ case (m.data)[(m.p)] > 50:
+ if 51 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 51 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ default:
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ switch _widec {
+ case 560:
+ goto st48
+ case 563:
+ goto st68
+ }
+ if 561 <= _widec && _widec <= 562 {
+ goto st67
+ }
+ goto st0
+ st48:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof48
+ }
+ stCase48:
+ _widec = int16((m.data)[(m.p)])
+ if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if 561 <= _widec && _widec <= 569 {
+ goto st49
+ }
+ goto st0
+ st49:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof49
+ }
+ stCase49:
+ _widec = int16((m.data)[(m.p)])
+ if 84 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 84 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if _widec == 596 {
+ goto st50
+ }
+ goto st0
+ st50:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof50
+ }
+ stCase50:
+ _widec = int16((m.data)[(m.p)])
+ switch {
+ case (m.data)[(m.p)] > 49:
+ if 50 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 50 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ case (m.data)[(m.p)] >= 48:
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if _widec == 562 {
+ goto st66
+ }
+ if 560 <= _widec && _widec <= 561 {
+ goto st51
+ }
+ goto st0
+ st51:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof51
+ }
+ stCase51:
+ _widec = int16((m.data)[(m.p)])
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if 560 <= _widec && _widec <= 569 {
+ goto st52
+ }
+ goto st0
+ st52:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof52
+ }
+ stCase52:
+ _widec = int16((m.data)[(m.p)])
+ if 58 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 58 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if _widec == 570 {
+ goto st53
+ }
+ goto st0
+ st53:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof53
+ }
+ stCase53:
+ _widec = int16((m.data)[(m.p)])
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 53 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if 560 <= _widec && _widec <= 565 {
+ goto st54
+ }
+ goto st0
+ st54:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof54
+ }
+ stCase54:
+ _widec = int16((m.data)[(m.p)])
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if 560 <= _widec && _widec <= 569 {
+ goto st55
+ }
+ goto st0
+ st55:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof55
+ }
+ stCase55:
+ _widec = int16((m.data)[(m.p)])
+ if 58 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 58 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if _widec == 570 {
+ goto st56
+ }
+ goto st0
+ st56:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof56
+ }
+ stCase56:
+ _widec = int16((m.data)[(m.p)])
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 53 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if 560 <= _widec && _widec <= 565 {
+ goto st57
+ }
+ goto st0
+ st57:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof57
+ }
+ stCase57:
+ _widec = int16((m.data)[(m.p)])
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if 560 <= _widec && _widec <= 569 {
+ goto st58
+ }
+ goto st0
+ st58:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof58
+ }
+ stCase58:
+ _widec = int16((m.data)[(m.p)])
+ switch {
+ case (m.data)[(m.p)] < 45:
+ if 43 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 43 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ case (m.data)[(m.p)] > 45:
+ if 90 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 90 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ default:
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ switch _widec {
+ case 555:
+ goto st59
+ case 557:
+ goto st59
+ case 602:
+ goto st64
+ }
+ goto tr72
+ st59:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof59
+ }
+ stCase59:
+ _widec = int16((m.data)[(m.p)])
+ switch {
+ case (m.data)[(m.p)] > 49:
+ if 50 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 50 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ case (m.data)[(m.p)] >= 48:
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if _widec == 562 {
+ goto st65
+ }
+ if 560 <= _widec && _widec <= 561 {
+ goto st60
+ }
+ goto tr72
+ st60:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof60
+ }
+ stCase60:
+ _widec = int16((m.data)[(m.p)])
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if 560 <= _widec && _widec <= 569 {
+ goto st61
+ }
+ goto tr72
+ st61:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof61
+ }
+ stCase61:
+ _widec = int16((m.data)[(m.p)])
+ if 58 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 58 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if _widec == 570 {
+ goto st62
+ }
+ goto tr72
+ st62:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof62
+ }
+ stCase62:
+ _widec = int16((m.data)[(m.p)])
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 53 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if 560 <= _widec && _widec <= 565 {
+ goto st63
+ }
+ goto tr72
+ st63:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof63
+ }
+ stCase63:
+ _widec = int16((m.data)[(m.p)])
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if 560 <= _widec && _widec <= 569 {
+ goto st64
+ }
+ goto tr72
+ st64:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof64
+ }
+ stCase64:
+ if (m.data)[(m.p)] == 32 {
+ goto tr80
+ }
+ goto st0
+ st65:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof65
+ }
+ stCase65:
+ _widec = int16((m.data)[(m.p)])
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 51 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if 560 <= _widec && _widec <= 563 {
+ goto st61
+ }
+ goto tr72
+ st66:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof66
+ }
+ stCase66:
+ _widec = int16((m.data)[(m.p)])
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 51 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if 560 <= _widec && _widec <= 563 {
+ goto st52
+ }
+ goto st0
+ st67:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof67
+ }
+ stCase67:
+ _widec = int16((m.data)[(m.p)])
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if 560 <= _widec && _widec <= 569 {
+ goto st49
+ }
+ goto st0
+ st68:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof68
+ }
+ stCase68:
+ _widec = int16((m.data)[(m.p)])
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 49 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if 560 <= _widec && _widec <= 561 {
+ goto st49
+ }
+ goto st0
+ st69:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof69
+ }
+ stCase69:
+ _widec = int16((m.data)[(m.p)])
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 50 {
+ _widec = 256 + (int16((m.data)[(m.p)]) - 0)
+ if m.rfc3339 {
+ _widec += 256
+ }
+ }
+ if 560 <= _widec && _widec <= 562 {
+ goto st46
+ }
+ goto st0
+ tr4:
+
+ m.pb = m.p
+
+ goto st70
+ st70:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof70
+ }
+ stCase70:
+
+ output.priority = uint8(common.UnsafeUTF8DecimalCodePointsToInt(m.text()))
+ output.prioritySet = true
+ switch (m.data)[(m.p)] {
+ case 57:
+ goto st72
+ case 62:
+ goto st4
+ }
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 56 {
+ goto st71
+ }
+ goto tr2
+ tr5:
+
+ m.pb = m.p
+
+ goto st71
+ st71:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof71
+ }
+ stCase71:
+
+ output.priority = uint8(common.UnsafeUTF8DecimalCodePointsToInt(m.text()))
+ output.prioritySet = true
+ if (m.data)[(m.p)] == 62 {
+ goto st4
+ }
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto st3
+ }
+ goto tr2
+ st72:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof72
+ }
+ stCase72:
+
+ output.priority = uint8(common.UnsafeUTF8DecimalCodePointsToInt(m.text()))
+ output.prioritySet = true
+ if (m.data)[(m.p)] == 62 {
+ goto st4
+ }
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 49 {
+ goto st3
+ }
+ goto tr2
+ st987:
+ if (m.p)++; (m.p) == (m.pe) {
+ goto _testEof987
+ }
+ stCase987:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto st0
+ case 13:
+ goto st0
+ }
+ goto st987
+ stOut:
+ _testEof2:
+ m.cs = 2
+ goto _testEof
+ _testEof3:
+ m.cs = 3
+ goto _testEof
+ _testEof4:
+ m.cs = 4
+ goto _testEof
+ _testEof5:
+ m.cs = 5
+ goto _testEof
+ _testEof6:
+ m.cs = 6
+ goto _testEof
+ _testEof7:
+ m.cs = 7
+ goto _testEof
+ _testEof8:
+ m.cs = 8
+ goto _testEof
+ _testEof9:
+ m.cs = 9
+ goto _testEof
+ _testEof10:
+ m.cs = 10
+ goto _testEof
+ _testEof11:
+ m.cs = 11
+ goto _testEof
+ _testEof12:
+ m.cs = 12
+ goto _testEof
+ _testEof13:
+ m.cs = 13
+ goto _testEof
+ _testEof14:
+ m.cs = 14
+ goto _testEof
+ _testEof15:
+ m.cs = 15
+ goto _testEof
+ _testEof16:
+ m.cs = 16
+ goto _testEof
+ _testEof17:
+ m.cs = 17
+ goto _testEof
+ _testEof18:
+ m.cs = 18
+ goto _testEof
+ _testEof19:
+ m.cs = 19
+ goto _testEof
+ _testEof20:
+ m.cs = 20
+ goto _testEof
+ _testEof73:
+ m.cs = 73
+ goto _testEof
+ _testEof74:
+ m.cs = 74
+ goto _testEof
+ _testEof75:
+ m.cs = 75
+ goto _testEof
+ _testEof76:
+ m.cs = 76
+ goto _testEof
+ _testEof77:
+ m.cs = 77
+ goto _testEof
+ _testEof78:
+ m.cs = 78
+ goto _testEof
+ _testEof79:
+ m.cs = 79
+ goto _testEof
+ _testEof80:
+ m.cs = 80
+ goto _testEof
+ _testEof81:
+ m.cs = 81
+ goto _testEof
+ _testEof82:
+ m.cs = 82
+ goto _testEof
+ _testEof83:
+ m.cs = 83
+ goto _testEof
+ _testEof84:
+ m.cs = 84
+ goto _testEof
+ _testEof85:
+ m.cs = 85
+ goto _testEof
+ _testEof86:
+ m.cs = 86
+ goto _testEof
+ _testEof87:
+ m.cs = 87
+ goto _testEof
+ _testEof88:
+ m.cs = 88
+ goto _testEof
+ _testEof89:
+ m.cs = 89
+ goto _testEof
+ _testEof90:
+ m.cs = 90
+ goto _testEof
+ _testEof91:
+ m.cs = 91
+ goto _testEof
+ _testEof92:
+ m.cs = 92
+ goto _testEof
+ _testEof93:
+ m.cs = 93
+ goto _testEof
+ _testEof94:
+ m.cs = 94
+ goto _testEof
+ _testEof95:
+ m.cs = 95
+ goto _testEof
+ _testEof96:
+ m.cs = 96
+ goto _testEof
+ _testEof97:
+ m.cs = 97
+ goto _testEof
+ _testEof98:
+ m.cs = 98
+ goto _testEof
+ _testEof99:
+ m.cs = 99
+ goto _testEof
+ _testEof100:
+ m.cs = 100
+ goto _testEof
+ _testEof101:
+ m.cs = 101
+ goto _testEof
+ _testEof102:
+ m.cs = 102
+ goto _testEof
+ _testEof103:
+ m.cs = 103
+ goto _testEof
+ _testEof104:
+ m.cs = 104
+ goto _testEof
+ _testEof105:
+ m.cs = 105
+ goto _testEof
+ _testEof106:
+ m.cs = 106
+ goto _testEof
+ _testEof107:
+ m.cs = 107
+ goto _testEof
+ _testEof108:
+ m.cs = 108
+ goto _testEof
+ _testEof109:
+ m.cs = 109
+ goto _testEof
+ _testEof110:
+ m.cs = 110
+ goto _testEof
+ _testEof111:
+ m.cs = 111
+ goto _testEof
+ _testEof112:
+ m.cs = 112
+ goto _testEof
+ _testEof113:
+ m.cs = 113
+ goto _testEof
+ _testEof114:
+ m.cs = 114
+ goto _testEof
+ _testEof115:
+ m.cs = 115
+ goto _testEof
+ _testEof116:
+ m.cs = 116
+ goto _testEof
+ _testEof117:
+ m.cs = 117
+ goto _testEof
+ _testEof118:
+ m.cs = 118
+ goto _testEof
+ _testEof119:
+ m.cs = 119
+ goto _testEof
+ _testEof120:
+ m.cs = 120
+ goto _testEof
+ _testEof121:
+ m.cs = 121
+ goto _testEof
+ _testEof122:
+ m.cs = 122
+ goto _testEof
+ _testEof123:
+ m.cs = 123
+ goto _testEof
+ _testEof124:
+ m.cs = 124
+ goto _testEof
+ _testEof125:
+ m.cs = 125
+ goto _testEof
+ _testEof126:
+ m.cs = 126
+ goto _testEof
+ _testEof127:
+ m.cs = 127
+ goto _testEof
+ _testEof128:
+ m.cs = 128
+ goto _testEof
+ _testEof129:
+ m.cs = 129
+ goto _testEof
+ _testEof130:
+ m.cs = 130
+ goto _testEof
+ _testEof131:
+ m.cs = 131
+ goto _testEof
+ _testEof132:
+ m.cs = 132
+ goto _testEof
+ _testEof133:
+ m.cs = 133
+ goto _testEof
+ _testEof134:
+ m.cs = 134
+ goto _testEof
+ _testEof135:
+ m.cs = 135
+ goto _testEof
+ _testEof136:
+ m.cs = 136
+ goto _testEof
+ _testEof137:
+ m.cs = 137
+ goto _testEof
+ _testEof138:
+ m.cs = 138
+ goto _testEof
+ _testEof139:
+ m.cs = 139
+ goto _testEof
+ _testEof140:
+ m.cs = 140
+ goto _testEof
+ _testEof141:
+ m.cs = 141
+ goto _testEof
+ _testEof142:
+ m.cs = 142
+ goto _testEof
+ _testEof143:
+ m.cs = 143
+ goto _testEof
+ _testEof144:
+ m.cs = 144
+ goto _testEof
+ _testEof145:
+ m.cs = 145
+ goto _testEof
+ _testEof146:
+ m.cs = 146
+ goto _testEof
+ _testEof147:
+ m.cs = 147
+ goto _testEof
+ _testEof148:
+ m.cs = 148
+ goto _testEof
+ _testEof149:
+ m.cs = 149
+ goto _testEof
+ _testEof150:
+ m.cs = 150
+ goto _testEof
+ _testEof151:
+ m.cs = 151
+ goto _testEof
+ _testEof152:
+ m.cs = 152
+ goto _testEof
+ _testEof153:
+ m.cs = 153
+ goto _testEof
+ _testEof154:
+ m.cs = 154
+ goto _testEof
+ _testEof155:
+ m.cs = 155
+ goto _testEof
+ _testEof156:
+ m.cs = 156
+ goto _testEof
+ _testEof157:
+ m.cs = 157
+ goto _testEof
+ _testEof158:
+ m.cs = 158
+ goto _testEof
+ _testEof159:
+ m.cs = 159
+ goto _testEof
+ _testEof160:
+ m.cs = 160
+ goto _testEof
+ _testEof161:
+ m.cs = 161
+ goto _testEof
+ _testEof162:
+ m.cs = 162
+ goto _testEof
+ _testEof163:
+ m.cs = 163
+ goto _testEof
+ _testEof164:
+ m.cs = 164
+ goto _testEof
+ _testEof165:
+ m.cs = 165
+ goto _testEof
+ _testEof166:
+ m.cs = 166
+ goto _testEof
+ _testEof167:
+ m.cs = 167
+ goto _testEof
+ _testEof168:
+ m.cs = 168
+ goto _testEof
+ _testEof169:
+ m.cs = 169
+ goto _testEof
+ _testEof170:
+ m.cs = 170
+ goto _testEof
+ _testEof171:
+ m.cs = 171
+ goto _testEof
+ _testEof172:
+ m.cs = 172
+ goto _testEof
+ _testEof173:
+ m.cs = 173
+ goto _testEof
+ _testEof174:
+ m.cs = 174
+ goto _testEof
+ _testEof175:
+ m.cs = 175
+ goto _testEof
+ _testEof176:
+ m.cs = 176
+ goto _testEof
+ _testEof177:
+ m.cs = 177
+ goto _testEof
+ _testEof178:
+ m.cs = 178
+ goto _testEof
+ _testEof179:
+ m.cs = 179
+ goto _testEof
+ _testEof180:
+ m.cs = 180
+ goto _testEof
+ _testEof181:
+ m.cs = 181
+ goto _testEof
+ _testEof182:
+ m.cs = 182
+ goto _testEof
+ _testEof183:
+ m.cs = 183
+ goto _testEof
+ _testEof184:
+ m.cs = 184
+ goto _testEof
+ _testEof185:
+ m.cs = 185
+ goto _testEof
+ _testEof186:
+ m.cs = 186
+ goto _testEof
+ _testEof187:
+ m.cs = 187
+ goto _testEof
+ _testEof188:
+ m.cs = 188
+ goto _testEof
+ _testEof189:
+ m.cs = 189
+ goto _testEof
+ _testEof190:
+ m.cs = 190
+ goto _testEof
+ _testEof191:
+ m.cs = 191
+ goto _testEof
+ _testEof192:
+ m.cs = 192
+ goto _testEof
+ _testEof193:
+ m.cs = 193
+ goto _testEof
+ _testEof194:
+ m.cs = 194
+ goto _testEof
+ _testEof195:
+ m.cs = 195
+ goto _testEof
+ _testEof196:
+ m.cs = 196
+ goto _testEof
+ _testEof197:
+ m.cs = 197
+ goto _testEof
+ _testEof198:
+ m.cs = 198
+ goto _testEof
+ _testEof199:
+ m.cs = 199
+ goto _testEof
+ _testEof200:
+ m.cs = 200
+ goto _testEof
+ _testEof201:
+ m.cs = 201
+ goto _testEof
+ _testEof202:
+ m.cs = 202
+ goto _testEof
+ _testEof203:
+ m.cs = 203
+ goto _testEof
+ _testEof204:
+ m.cs = 204
+ goto _testEof
+ _testEof205:
+ m.cs = 205
+ goto _testEof
+ _testEof206:
+ m.cs = 206
+ goto _testEof
+ _testEof207:
+ m.cs = 207
+ goto _testEof
+ _testEof208:
+ m.cs = 208
+ goto _testEof
+ _testEof209:
+ m.cs = 209
+ goto _testEof
+ _testEof210:
+ m.cs = 210
+ goto _testEof
+ _testEof211:
+ m.cs = 211
+ goto _testEof
+ _testEof212:
+ m.cs = 212
+ goto _testEof
+ _testEof213:
+ m.cs = 213
+ goto _testEof
+ _testEof214:
+ m.cs = 214
+ goto _testEof
+ _testEof215:
+ m.cs = 215
+ goto _testEof
+ _testEof216:
+ m.cs = 216
+ goto _testEof
+ _testEof217:
+ m.cs = 217
+ goto _testEof
+ _testEof218:
+ m.cs = 218
+ goto _testEof
+ _testEof219:
+ m.cs = 219
+ goto _testEof
+ _testEof220:
+ m.cs = 220
+ goto _testEof
+ _testEof221:
+ m.cs = 221
+ goto _testEof
+ _testEof222:
+ m.cs = 222
+ goto _testEof
+ _testEof223:
+ m.cs = 223
+ goto _testEof
+ _testEof224:
+ m.cs = 224
+ goto _testEof
+ _testEof225:
+ m.cs = 225
+ goto _testEof
+ _testEof226:
+ m.cs = 226
+ goto _testEof
+ _testEof227:
+ m.cs = 227
+ goto _testEof
+ _testEof228:
+ m.cs = 228
+ goto _testEof
+ _testEof229:
+ m.cs = 229
+ goto _testEof
+ _testEof230:
+ m.cs = 230
+ goto _testEof
+ _testEof231:
+ m.cs = 231
+ goto _testEof
+ _testEof232:
+ m.cs = 232
+ goto _testEof
+ _testEof233:
+ m.cs = 233
+ goto _testEof
+ _testEof234:
+ m.cs = 234
+ goto _testEof
+ _testEof235:
+ m.cs = 235
+ goto _testEof
+ _testEof236:
+ m.cs = 236
+ goto _testEof
+ _testEof237:
+ m.cs = 237
+ goto _testEof
+ _testEof238:
+ m.cs = 238
+ goto _testEof
+ _testEof239:
+ m.cs = 239
+ goto _testEof
+ _testEof240:
+ m.cs = 240
+ goto _testEof
+ _testEof241:
+ m.cs = 241
+ goto _testEof
+ _testEof242:
+ m.cs = 242
+ goto _testEof
+ _testEof243:
+ m.cs = 243
+ goto _testEof
+ _testEof244:
+ m.cs = 244
+ goto _testEof
+ _testEof245:
+ m.cs = 245
+ goto _testEof
+ _testEof246:
+ m.cs = 246
+ goto _testEof
+ _testEof247:
+ m.cs = 247
+ goto _testEof
+ _testEof248:
+ m.cs = 248
+ goto _testEof
+ _testEof249:
+ m.cs = 249
+ goto _testEof
+ _testEof250:
+ m.cs = 250
+ goto _testEof
+ _testEof251:
+ m.cs = 251
+ goto _testEof
+ _testEof252:
+ m.cs = 252
+ goto _testEof
+ _testEof253:
+ m.cs = 253
+ goto _testEof
+ _testEof254:
+ m.cs = 254
+ goto _testEof
+ _testEof255:
+ m.cs = 255
+ goto _testEof
+ _testEof256:
+ m.cs = 256
+ goto _testEof
+ _testEof257:
+ m.cs = 257
+ goto _testEof
+ _testEof258:
+ m.cs = 258
+ goto _testEof
+ _testEof259:
+ m.cs = 259
+ goto _testEof
+ _testEof260:
+ m.cs = 260
+ goto _testEof
+ _testEof261:
+ m.cs = 261
+ goto _testEof
+ _testEof262:
+ m.cs = 262
+ goto _testEof
+ _testEof263:
+ m.cs = 263
+ goto _testEof
+ _testEof264:
+ m.cs = 264
+ goto _testEof
+ _testEof265:
+ m.cs = 265
+ goto _testEof
+ _testEof266:
+ m.cs = 266
+ goto _testEof
+ _testEof267:
+ m.cs = 267
+ goto _testEof
+ _testEof268:
+ m.cs = 268
+ goto _testEof
+ _testEof269:
+ m.cs = 269
+ goto _testEof
+ _testEof270:
+ m.cs = 270
+ goto _testEof
+ _testEof271:
+ m.cs = 271
+ goto _testEof
+ _testEof272:
+ m.cs = 272
+ goto _testEof
+ _testEof273:
+ m.cs = 273
+ goto _testEof
+ _testEof274:
+ m.cs = 274
+ goto _testEof
+ _testEof275:
+ m.cs = 275
+ goto _testEof
+ _testEof276:
+ m.cs = 276
+ goto _testEof
+ _testEof277:
+ m.cs = 277
+ goto _testEof
+ _testEof278:
+ m.cs = 278
+ goto _testEof
+ _testEof279:
+ m.cs = 279
+ goto _testEof
+ _testEof280:
+ m.cs = 280
+ goto _testEof
+ _testEof281:
+ m.cs = 281
+ goto _testEof
+ _testEof282:
+ m.cs = 282
+ goto _testEof
+ _testEof283:
+ m.cs = 283
+ goto _testEof
+ _testEof284:
+ m.cs = 284
+ goto _testEof
+ _testEof285:
+ m.cs = 285
+ goto _testEof
+ _testEof286:
+ m.cs = 286
+ goto _testEof
+ _testEof287:
+ m.cs = 287
+ goto _testEof
+ _testEof288:
+ m.cs = 288
+ goto _testEof
+ _testEof289:
+ m.cs = 289
+ goto _testEof
+ _testEof290:
+ m.cs = 290
+ goto _testEof
+ _testEof291:
+ m.cs = 291
+ goto _testEof
+ _testEof292:
+ m.cs = 292
+ goto _testEof
+ _testEof293:
+ m.cs = 293
+ goto _testEof
+ _testEof294:
+ m.cs = 294
+ goto _testEof
+ _testEof295:
+ m.cs = 295
+ goto _testEof
+ _testEof296:
+ m.cs = 296
+ goto _testEof
+ _testEof297:
+ m.cs = 297
+ goto _testEof
+ _testEof298:
+ m.cs = 298
+ goto _testEof
+ _testEof299:
+ m.cs = 299
+ goto _testEof
+ _testEof300:
+ m.cs = 300
+ goto _testEof
+ _testEof301:
+ m.cs = 301
+ goto _testEof
+ _testEof302:
+ m.cs = 302
+ goto _testEof
+ _testEof303:
+ m.cs = 303
+ goto _testEof
+ _testEof304:
+ m.cs = 304
+ goto _testEof
+ _testEof305:
+ m.cs = 305
+ goto _testEof
+ _testEof306:
+ m.cs = 306
+ goto _testEof
+ _testEof307:
+ m.cs = 307
+ goto _testEof
+ _testEof308:
+ m.cs = 308
+ goto _testEof
+ _testEof309:
+ m.cs = 309
+ goto _testEof
+ _testEof310:
+ m.cs = 310
+ goto _testEof
+ _testEof311:
+ m.cs = 311
+ goto _testEof
+ _testEof312:
+ m.cs = 312
+ goto _testEof
+ _testEof313:
+ m.cs = 313
+ goto _testEof
+ _testEof314:
+ m.cs = 314
+ goto _testEof
+ _testEof315:
+ m.cs = 315
+ goto _testEof
+ _testEof316:
+ m.cs = 316
+ goto _testEof
+ _testEof317:
+ m.cs = 317
+ goto _testEof
+ _testEof318:
+ m.cs = 318
+ goto _testEof
+ _testEof319:
+ m.cs = 319
+ goto _testEof
+ _testEof320:
+ m.cs = 320
+ goto _testEof
+ _testEof321:
+ m.cs = 321
+ goto _testEof
+ _testEof322:
+ m.cs = 322
+ goto _testEof
+ _testEof323:
+ m.cs = 323
+ goto _testEof
+ _testEof324:
+ m.cs = 324
+ goto _testEof
+ _testEof325:
+ m.cs = 325
+ goto _testEof
+ _testEof326:
+ m.cs = 326
+ goto _testEof
+ _testEof327:
+ m.cs = 327
+ goto _testEof
+ _testEof328:
+ m.cs = 328
+ goto _testEof
+ _testEof329:
+ m.cs = 329
+ goto _testEof
+ _testEof330:
+ m.cs = 330
+ goto _testEof
+ _testEof331:
+ m.cs = 331
+ goto _testEof
+ _testEof332:
+ m.cs = 332
+ goto _testEof
+ _testEof333:
+ m.cs = 333
+ goto _testEof
+ _testEof334:
+ m.cs = 334
+ goto _testEof
+ _testEof335:
+ m.cs = 335
+ goto _testEof
+ _testEof336:
+ m.cs = 336
+ goto _testEof
+ _testEof337:
+ m.cs = 337
+ goto _testEof
+ _testEof338:
+ m.cs = 338
+ goto _testEof
+ _testEof339:
+ m.cs = 339
+ goto _testEof
+ _testEof340:
+ m.cs = 340
+ goto _testEof
+ _testEof341:
+ m.cs = 341
+ goto _testEof
+ _testEof342:
+ m.cs = 342
+ goto _testEof
+ _testEof343:
+ m.cs = 343
+ goto _testEof
+ _testEof344:
+ m.cs = 344
+ goto _testEof
+ _testEof345:
+ m.cs = 345
+ goto _testEof
+ _testEof346:
+ m.cs = 346
+ goto _testEof
+ _testEof347:
+ m.cs = 347
+ goto _testEof
+ _testEof348:
+ m.cs = 348
+ goto _testEof
+ _testEof349:
+ m.cs = 349
+ goto _testEof
+ _testEof350:
+ m.cs = 350
+ goto _testEof
+ _testEof351:
+ m.cs = 351
+ goto _testEof
+ _testEof352:
+ m.cs = 352
+ goto _testEof
+ _testEof353:
+ m.cs = 353
+ goto _testEof
+ _testEof354:
+ m.cs = 354
+ goto _testEof
+ _testEof355:
+ m.cs = 355
+ goto _testEof
+ _testEof356:
+ m.cs = 356
+ goto _testEof
+ _testEof357:
+ m.cs = 357
+ goto _testEof
+ _testEof358:
+ m.cs = 358
+ goto _testEof
+ _testEof359:
+ m.cs = 359
+ goto _testEof
+ _testEof360:
+ m.cs = 360
+ goto _testEof
+ _testEof361:
+ m.cs = 361
+ goto _testEof
+ _testEof362:
+ m.cs = 362
+ goto _testEof
+ _testEof363:
+ m.cs = 363
+ goto _testEof
+ _testEof364:
+ m.cs = 364
+ goto _testEof
+ _testEof365:
+ m.cs = 365
+ goto _testEof
+ _testEof366:
+ m.cs = 366
+ goto _testEof
+ _testEof367:
+ m.cs = 367
+ goto _testEof
+ _testEof368:
+ m.cs = 368
+ goto _testEof
+ _testEof369:
+ m.cs = 369
+ goto _testEof
+ _testEof370:
+ m.cs = 370
+ goto _testEof
+ _testEof371:
+ m.cs = 371
+ goto _testEof
+ _testEof372:
+ m.cs = 372
+ goto _testEof
+ _testEof373:
+ m.cs = 373
+ goto _testEof
+ _testEof374:
+ m.cs = 374
+ goto _testEof
+ _testEof375:
+ m.cs = 375
+ goto _testEof
+ _testEof376:
+ m.cs = 376
+ goto _testEof
+ _testEof377:
+ m.cs = 377
+ goto _testEof
+ _testEof378:
+ m.cs = 378
+ goto _testEof
+ _testEof379:
+ m.cs = 379
+ goto _testEof
+ _testEof380:
+ m.cs = 380
+ goto _testEof
+ _testEof381:
+ m.cs = 381
+ goto _testEof
+ _testEof382:
+ m.cs = 382
+ goto _testEof
+ _testEof383:
+ m.cs = 383
+ goto _testEof
+ _testEof384:
+ m.cs = 384
+ goto _testEof
+ _testEof385:
+ m.cs = 385
+ goto _testEof
+ _testEof386:
+ m.cs = 386
+ goto _testEof
+ _testEof387:
+ m.cs = 387
+ goto _testEof
+ _testEof388:
+ m.cs = 388
+ goto _testEof
+ _testEof389:
+ m.cs = 389
+ goto _testEof
+ _testEof390:
+ m.cs = 390
+ goto _testEof
+ _testEof391:
+ m.cs = 391
+ goto _testEof
+ _testEof392:
+ m.cs = 392
+ goto _testEof
+ _testEof393:
+ m.cs = 393
+ goto _testEof
+ _testEof394:
+ m.cs = 394
+ goto _testEof
+ _testEof395:
+ m.cs = 395
+ goto _testEof
+ _testEof396:
+ m.cs = 396
+ goto _testEof
+ _testEof397:
+ m.cs = 397
+ goto _testEof
+ _testEof398:
+ m.cs = 398
+ goto _testEof
+ _testEof399:
+ m.cs = 399
+ goto _testEof
+ _testEof400:
+ m.cs = 400
+ goto _testEof
+ _testEof401:
+ m.cs = 401
+ goto _testEof
+ _testEof402:
+ m.cs = 402
+ goto _testEof
+ _testEof403:
+ m.cs = 403
+ goto _testEof
+ _testEof404:
+ m.cs = 404
+ goto _testEof
+ _testEof405:
+ m.cs = 405
+ goto _testEof
+ _testEof406:
+ m.cs = 406
+ goto _testEof
+ _testEof407:
+ m.cs = 407
+ goto _testEof
+ _testEof408:
+ m.cs = 408
+ goto _testEof
+ _testEof409:
+ m.cs = 409
+ goto _testEof
+ _testEof410:
+ m.cs = 410
+ goto _testEof
+ _testEof411:
+ m.cs = 411
+ goto _testEof
+ _testEof412:
+ m.cs = 412
+ goto _testEof
+ _testEof413:
+ m.cs = 413
+ goto _testEof
+ _testEof414:
+ m.cs = 414
+ goto _testEof
+ _testEof415:
+ m.cs = 415
+ goto _testEof
+ _testEof416:
+ m.cs = 416
+ goto _testEof
+ _testEof417:
+ m.cs = 417
+ goto _testEof
+ _testEof418:
+ m.cs = 418
+ goto _testEof
+ _testEof419:
+ m.cs = 419
+ goto _testEof
+ _testEof420:
+ m.cs = 420
+ goto _testEof
+ _testEof421:
+ m.cs = 421
+ goto _testEof
+ _testEof422:
+ m.cs = 422
+ goto _testEof
+ _testEof423:
+ m.cs = 423
+ goto _testEof
+ _testEof424:
+ m.cs = 424
+ goto _testEof
+ _testEof425:
+ m.cs = 425
+ goto _testEof
+ _testEof426:
+ m.cs = 426
+ goto _testEof
+ _testEof427:
+ m.cs = 427
+ goto _testEof
+ _testEof428:
+ m.cs = 428
+ goto _testEof
+ _testEof429:
+ m.cs = 429
+ goto _testEof
+ _testEof430:
+ m.cs = 430
+ goto _testEof
+ _testEof431:
+ m.cs = 431
+ goto _testEof
+ _testEof432:
+ m.cs = 432
+ goto _testEof
+ _testEof433:
+ m.cs = 433
+ goto _testEof
+ _testEof434:
+ m.cs = 434
+ goto _testEof
+ _testEof435:
+ m.cs = 435
+ goto _testEof
+ _testEof436:
+ m.cs = 436
+ goto _testEof
+ _testEof437:
+ m.cs = 437
+ goto _testEof
+ _testEof438:
+ m.cs = 438
+ goto _testEof
+ _testEof439:
+ m.cs = 439
+ goto _testEof
+ _testEof440:
+ m.cs = 440
+ goto _testEof
+ _testEof441:
+ m.cs = 441
+ goto _testEof
+ _testEof442:
+ m.cs = 442
+ goto _testEof
+ _testEof443:
+ m.cs = 443
+ goto _testEof
+ _testEof444:
+ m.cs = 444
+ goto _testEof
+ _testEof445:
+ m.cs = 445
+ goto _testEof
+ _testEof446:
+ m.cs = 446
+ goto _testEof
+ _testEof447:
+ m.cs = 447
+ goto _testEof
+ _testEof448:
+ m.cs = 448
+ goto _testEof
+ _testEof449:
+ m.cs = 449
+ goto _testEof
+ _testEof450:
+ m.cs = 450
+ goto _testEof
+ _testEof451:
+ m.cs = 451
+ goto _testEof
+ _testEof452:
+ m.cs = 452
+ goto _testEof
+ _testEof453:
+ m.cs = 453
+ goto _testEof
+ _testEof454:
+ m.cs = 454
+ goto _testEof
+ _testEof455:
+ m.cs = 455
+ goto _testEof
+ _testEof456:
+ m.cs = 456
+ goto _testEof
+ _testEof457:
+ m.cs = 457
+ goto _testEof
+ _testEof458:
+ m.cs = 458
+ goto _testEof
+ _testEof459:
+ m.cs = 459
+ goto _testEof
+ _testEof460:
+ m.cs = 460
+ goto _testEof
+ _testEof461:
+ m.cs = 461
+ goto _testEof
+ _testEof462:
+ m.cs = 462
+ goto _testEof
+ _testEof463:
+ m.cs = 463
+ goto _testEof
+ _testEof464:
+ m.cs = 464
+ goto _testEof
+ _testEof465:
+ m.cs = 465
+ goto _testEof
+ _testEof466:
+ m.cs = 466
+ goto _testEof
+ _testEof467:
+ m.cs = 467
+ goto _testEof
+ _testEof468:
+ m.cs = 468
+ goto _testEof
+ _testEof469:
+ m.cs = 469
+ goto _testEof
+ _testEof470:
+ m.cs = 470
+ goto _testEof
+ _testEof471:
+ m.cs = 471
+ goto _testEof
+ _testEof472:
+ m.cs = 472
+ goto _testEof
+ _testEof473:
+ m.cs = 473
+ goto _testEof
+ _testEof474:
+ m.cs = 474
+ goto _testEof
+ _testEof475:
+ m.cs = 475
+ goto _testEof
+ _testEof476:
+ m.cs = 476
+ goto _testEof
+ _testEof477:
+ m.cs = 477
+ goto _testEof
+ _testEof478:
+ m.cs = 478
+ goto _testEof
+ _testEof479:
+ m.cs = 479
+ goto _testEof
+ _testEof480:
+ m.cs = 480
+ goto _testEof
+ _testEof481:
+ m.cs = 481
+ goto _testEof
+ _testEof482:
+ m.cs = 482
+ goto _testEof
+ _testEof483:
+ m.cs = 483
+ goto _testEof
+ _testEof484:
+ m.cs = 484
+ goto _testEof
+ _testEof485:
+ m.cs = 485
+ goto _testEof
+ _testEof486:
+ m.cs = 486
+ goto _testEof
+ _testEof487:
+ m.cs = 487
+ goto _testEof
+ _testEof488:
+ m.cs = 488
+ goto _testEof
+ _testEof489:
+ m.cs = 489
+ goto _testEof
+ _testEof490:
+ m.cs = 490
+ goto _testEof
+ _testEof491:
+ m.cs = 491
+ goto _testEof
+ _testEof492:
+ m.cs = 492
+ goto _testEof
+ _testEof493:
+ m.cs = 493
+ goto _testEof
+ _testEof494:
+ m.cs = 494
+ goto _testEof
+ _testEof495:
+ m.cs = 495
+ goto _testEof
+ _testEof496:
+ m.cs = 496
+ goto _testEof
+ _testEof497:
+ m.cs = 497
+ goto _testEof
+ _testEof498:
+ m.cs = 498
+ goto _testEof
+ _testEof499:
+ m.cs = 499
+ goto _testEof
+ _testEof500:
+ m.cs = 500
+ goto _testEof
+ _testEof501:
+ m.cs = 501
+ goto _testEof
+ _testEof502:
+ m.cs = 502
+ goto _testEof
+ _testEof503:
+ m.cs = 503
+ goto _testEof
+ _testEof504:
+ m.cs = 504
+ goto _testEof
+ _testEof505:
+ m.cs = 505
+ goto _testEof
+ _testEof506:
+ m.cs = 506
+ goto _testEof
+ _testEof507:
+ m.cs = 507
+ goto _testEof
+ _testEof508:
+ m.cs = 508
+ goto _testEof
+ _testEof509:
+ m.cs = 509
+ goto _testEof
+ _testEof510:
+ m.cs = 510
+ goto _testEof
+ _testEof511:
+ m.cs = 511
+ goto _testEof
+ _testEof512:
+ m.cs = 512
+ goto _testEof
+ _testEof513:
+ m.cs = 513
+ goto _testEof
+ _testEof514:
+ m.cs = 514
+ goto _testEof
+ _testEof515:
+ m.cs = 515
+ goto _testEof
+ _testEof516:
+ m.cs = 516
+ goto _testEof
+ _testEof517:
+ m.cs = 517
+ goto _testEof
+ _testEof518:
+ m.cs = 518
+ goto _testEof
+ _testEof519:
+ m.cs = 519
+ goto _testEof
+ _testEof520:
+ m.cs = 520
+ goto _testEof
+ _testEof521:
+ m.cs = 521
+ goto _testEof
+ _testEof522:
+ m.cs = 522
+ goto _testEof
+ _testEof523:
+ m.cs = 523
+ goto _testEof
+ _testEof524:
+ m.cs = 524
+ goto _testEof
+ _testEof525:
+ m.cs = 525
+ goto _testEof
+ _testEof526:
+ m.cs = 526
+ goto _testEof
+ _testEof527:
+ m.cs = 527
+ goto _testEof
+ _testEof528:
+ m.cs = 528
+ goto _testEof
+ _testEof529:
+ m.cs = 529
+ goto _testEof
+ _testEof530:
+ m.cs = 530
+ goto _testEof
+ _testEof531:
+ m.cs = 531
+ goto _testEof
+ _testEof532:
+ m.cs = 532
+ goto _testEof
+ _testEof533:
+ m.cs = 533
+ goto _testEof
+ _testEof534:
+ m.cs = 534
+ goto _testEof
+ _testEof535:
+ m.cs = 535
+ goto _testEof
+ _testEof536:
+ m.cs = 536
+ goto _testEof
+ _testEof537:
+ m.cs = 537
+ goto _testEof
+ _testEof538:
+ m.cs = 538
+ goto _testEof
+ _testEof539:
+ m.cs = 539
+ goto _testEof
+ _testEof540:
+ m.cs = 540
+ goto _testEof
+ _testEof541:
+ m.cs = 541
+ goto _testEof
+ _testEof542:
+ m.cs = 542
+ goto _testEof
+ _testEof543:
+ m.cs = 543
+ goto _testEof
+ _testEof544:
+ m.cs = 544
+ goto _testEof
+ _testEof545:
+ m.cs = 545
+ goto _testEof
+ _testEof546:
+ m.cs = 546
+ goto _testEof
+ _testEof547:
+ m.cs = 547
+ goto _testEof
+ _testEof548:
+ m.cs = 548
+ goto _testEof
+ _testEof549:
+ m.cs = 549
+ goto _testEof
+ _testEof550:
+ m.cs = 550
+ goto _testEof
+ _testEof551:
+ m.cs = 551
+ goto _testEof
+ _testEof552:
+ m.cs = 552
+ goto _testEof
+ _testEof553:
+ m.cs = 553
+ goto _testEof
+ _testEof554:
+ m.cs = 554
+ goto _testEof
+ _testEof555:
+ m.cs = 555
+ goto _testEof
+ _testEof556:
+ m.cs = 556
+ goto _testEof
+ _testEof557:
+ m.cs = 557
+ goto _testEof
+ _testEof558:
+ m.cs = 558
+ goto _testEof
+ _testEof559:
+ m.cs = 559
+ goto _testEof
+ _testEof560:
+ m.cs = 560
+ goto _testEof
+ _testEof561:
+ m.cs = 561
+ goto _testEof
+ _testEof562:
+ m.cs = 562
+ goto _testEof
+ _testEof563:
+ m.cs = 563
+ goto _testEof
+ _testEof564:
+ m.cs = 564
+ goto _testEof
+ _testEof565:
+ m.cs = 565
+ goto _testEof
+ _testEof566:
+ m.cs = 566
+ goto _testEof
+ _testEof567:
+ m.cs = 567
+ goto _testEof
+ _testEof568:
+ m.cs = 568
+ goto _testEof
+ _testEof569:
+ m.cs = 569
+ goto _testEof
+ _testEof570:
+ m.cs = 570
+ goto _testEof
+ _testEof571:
+ m.cs = 571
+ goto _testEof
+ _testEof572:
+ m.cs = 572
+ goto _testEof
+ _testEof573:
+ m.cs = 573
+ goto _testEof
+ _testEof574:
+ m.cs = 574
+ goto _testEof
+ _testEof575:
+ m.cs = 575
+ goto _testEof
+ _testEof576:
+ m.cs = 576
+ goto _testEof
+ _testEof577:
+ m.cs = 577
+ goto _testEof
+ _testEof578:
+ m.cs = 578
+ goto _testEof
+ _testEof579:
+ m.cs = 579
+ goto _testEof
+ _testEof580:
+ m.cs = 580
+ goto _testEof
+ _testEof581:
+ m.cs = 581
+ goto _testEof
+ _testEof582:
+ m.cs = 582
+ goto _testEof
+ _testEof583:
+ m.cs = 583
+ goto _testEof
+ _testEof584:
+ m.cs = 584
+ goto _testEof
+ _testEof585:
+ m.cs = 585
+ goto _testEof
+ _testEof586:
+ m.cs = 586
+ goto _testEof
+ _testEof587:
+ m.cs = 587
+ goto _testEof
+ _testEof588:
+ m.cs = 588
+ goto _testEof
+ _testEof589:
+ m.cs = 589
+ goto _testEof
+ _testEof590:
+ m.cs = 590
+ goto _testEof
+ _testEof591:
+ m.cs = 591
+ goto _testEof
+ _testEof592:
+ m.cs = 592
+ goto _testEof
+ _testEof593:
+ m.cs = 593
+ goto _testEof
+ _testEof594:
+ m.cs = 594
+ goto _testEof
+ _testEof595:
+ m.cs = 595
+ goto _testEof
+ _testEof596:
+ m.cs = 596
+ goto _testEof
+ _testEof597:
+ m.cs = 597
+ goto _testEof
+ _testEof598:
+ m.cs = 598
+ goto _testEof
+ _testEof599:
+ m.cs = 599
+ goto _testEof
+ _testEof600:
+ m.cs = 600
+ goto _testEof
+ _testEof601:
+ m.cs = 601
+ goto _testEof
+ _testEof602:
+ m.cs = 602
+ goto _testEof
+ _testEof603:
+ m.cs = 603
+ goto _testEof
+ _testEof604:
+ m.cs = 604
+ goto _testEof
+ _testEof605:
+ m.cs = 605
+ goto _testEof
+ _testEof606:
+ m.cs = 606
+ goto _testEof
+ _testEof607:
+ m.cs = 607
+ goto _testEof
+ _testEof608:
+ m.cs = 608
+ goto _testEof
+ _testEof609:
+ m.cs = 609
+ goto _testEof
+ _testEof610:
+ m.cs = 610
+ goto _testEof
+ _testEof611:
+ m.cs = 611
+ goto _testEof
+ _testEof612:
+ m.cs = 612
+ goto _testEof
+ _testEof613:
+ m.cs = 613
+ goto _testEof
+ _testEof614:
+ m.cs = 614
+ goto _testEof
+ _testEof615:
+ m.cs = 615
+ goto _testEof
+ _testEof616:
+ m.cs = 616
+ goto _testEof
+ _testEof617:
+ m.cs = 617
+ goto _testEof
+ _testEof618:
+ m.cs = 618
+ goto _testEof
+ _testEof619:
+ m.cs = 619
+ goto _testEof
+ _testEof620:
+ m.cs = 620
+ goto _testEof
+ _testEof621:
+ m.cs = 621
+ goto _testEof
+ _testEof622:
+ m.cs = 622
+ goto _testEof
+ _testEof623:
+ m.cs = 623
+ goto _testEof
+ _testEof624:
+ m.cs = 624
+ goto _testEof
+ _testEof625:
+ m.cs = 625
+ goto _testEof
+ _testEof626:
+ m.cs = 626
+ goto _testEof
+ _testEof627:
+ m.cs = 627
+ goto _testEof
+ _testEof628:
+ m.cs = 628
+ goto _testEof
+ _testEof629:
+ m.cs = 629
+ goto _testEof
+ _testEof630:
+ m.cs = 630
+ goto _testEof
+ _testEof631:
+ m.cs = 631
+ goto _testEof
+ _testEof632:
+ m.cs = 632
+ goto _testEof
+ _testEof633:
+ m.cs = 633
+ goto _testEof
+ _testEof634:
+ m.cs = 634
+ goto _testEof
+ _testEof635:
+ m.cs = 635
+ goto _testEof
+ _testEof636:
+ m.cs = 636
+ goto _testEof
+ _testEof637:
+ m.cs = 637
+ goto _testEof
+ _testEof638:
+ m.cs = 638
+ goto _testEof
+ _testEof639:
+ m.cs = 639
+ goto _testEof
+ _testEof640:
+ m.cs = 640
+ goto _testEof
+ _testEof641:
+ m.cs = 641
+ goto _testEof
+ _testEof642:
+ m.cs = 642
+ goto _testEof
+ _testEof643:
+ m.cs = 643
+ goto _testEof
+ _testEof644:
+ m.cs = 644
+ goto _testEof
+ _testEof645:
+ m.cs = 645
+ goto _testEof
+ _testEof646:
+ m.cs = 646
+ goto _testEof
+ _testEof647:
+ m.cs = 647
+ goto _testEof
+ _testEof648:
+ m.cs = 648
+ goto _testEof
+ _testEof649:
+ m.cs = 649
+ goto _testEof
+ _testEof650:
+ m.cs = 650
+ goto _testEof
+ _testEof651:
+ m.cs = 651
+ goto _testEof
+ _testEof652:
+ m.cs = 652
+ goto _testEof
+ _testEof653:
+ m.cs = 653
+ goto _testEof
+ _testEof654:
+ m.cs = 654
+ goto _testEof
+ _testEof655:
+ m.cs = 655
+ goto _testEof
+ _testEof656:
+ m.cs = 656
+ goto _testEof
+ _testEof657:
+ m.cs = 657
+ goto _testEof
+ _testEof658:
+ m.cs = 658
+ goto _testEof
+ _testEof659:
+ m.cs = 659
+ goto _testEof
+ _testEof660:
+ m.cs = 660
+ goto _testEof
+ _testEof661:
+ m.cs = 661
+ goto _testEof
+ _testEof662:
+ m.cs = 662
+ goto _testEof
+ _testEof663:
+ m.cs = 663
+ goto _testEof
+ _testEof664:
+ m.cs = 664
+ goto _testEof
+ _testEof665:
+ m.cs = 665
+ goto _testEof
+ _testEof666:
+ m.cs = 666
+ goto _testEof
+ _testEof667:
+ m.cs = 667
+ goto _testEof
+ _testEof668:
+ m.cs = 668
+ goto _testEof
+ _testEof669:
+ m.cs = 669
+ goto _testEof
+ _testEof670:
+ m.cs = 670
+ goto _testEof
+ _testEof671:
+ m.cs = 671
+ goto _testEof
+ _testEof672:
+ m.cs = 672
+ goto _testEof
+ _testEof673:
+ m.cs = 673
+ goto _testEof
+ _testEof674:
+ m.cs = 674
+ goto _testEof
+ _testEof675:
+ m.cs = 675
+ goto _testEof
+ _testEof676:
+ m.cs = 676
+ goto _testEof
+ _testEof677:
+ m.cs = 677
+ goto _testEof
+ _testEof678:
+ m.cs = 678
+ goto _testEof
+ _testEof679:
+ m.cs = 679
+ goto _testEof
+ _testEof680:
+ m.cs = 680
+ goto _testEof
+ _testEof681:
+ m.cs = 681
+ goto _testEof
+ _testEof682:
+ m.cs = 682
+ goto _testEof
+ _testEof683:
+ m.cs = 683
+ goto _testEof
+ _testEof684:
+ m.cs = 684
+ goto _testEof
+ _testEof685:
+ m.cs = 685
+ goto _testEof
+ _testEof686:
+ m.cs = 686
+ goto _testEof
+ _testEof687:
+ m.cs = 687
+ goto _testEof
+ _testEof688:
+ m.cs = 688
+ goto _testEof
+ _testEof689:
+ m.cs = 689
+ goto _testEof
+ _testEof690:
+ m.cs = 690
+ goto _testEof
+ _testEof691:
+ m.cs = 691
+ goto _testEof
+ _testEof692:
+ m.cs = 692
+ goto _testEof
+ _testEof693:
+ m.cs = 693
+ goto _testEof
+ _testEof694:
+ m.cs = 694
+ goto _testEof
+ _testEof695:
+ m.cs = 695
+ goto _testEof
+ _testEof696:
+ m.cs = 696
+ goto _testEof
+ _testEof697:
+ m.cs = 697
+ goto _testEof
+ _testEof698:
+ m.cs = 698
+ goto _testEof
+ _testEof699:
+ m.cs = 699
+ goto _testEof
+ _testEof700:
+ m.cs = 700
+ goto _testEof
+ _testEof701:
+ m.cs = 701
+ goto _testEof
+ _testEof702:
+ m.cs = 702
+ goto _testEof
+ _testEof703:
+ m.cs = 703
+ goto _testEof
+ _testEof704:
+ m.cs = 704
+ goto _testEof
+ _testEof705:
+ m.cs = 705
+ goto _testEof
+ _testEof706:
+ m.cs = 706
+ goto _testEof
+ _testEof707:
+ m.cs = 707
+ goto _testEof
+ _testEof708:
+ m.cs = 708
+ goto _testEof
+ _testEof709:
+ m.cs = 709
+ goto _testEof
+ _testEof710:
+ m.cs = 710
+ goto _testEof
+ _testEof711:
+ m.cs = 711
+ goto _testEof
+ _testEof712:
+ m.cs = 712
+ goto _testEof
+ _testEof713:
+ m.cs = 713
+ goto _testEof
+ _testEof714:
+ m.cs = 714
+ goto _testEof
+ _testEof715:
+ m.cs = 715
+ goto _testEof
+ _testEof716:
+ m.cs = 716
+ goto _testEof
+ _testEof717:
+ m.cs = 717
+ goto _testEof
+ _testEof718:
+ m.cs = 718
+ goto _testEof
+ _testEof719:
+ m.cs = 719
+ goto _testEof
+ _testEof720:
+ m.cs = 720
+ goto _testEof
+ _testEof721:
+ m.cs = 721
+ goto _testEof
+ _testEof722:
+ m.cs = 722
+ goto _testEof
+ _testEof723:
+ m.cs = 723
+ goto _testEof
+ _testEof724:
+ m.cs = 724
+ goto _testEof
+ _testEof725:
+ m.cs = 725
+ goto _testEof
+ _testEof726:
+ m.cs = 726
+ goto _testEof
+ _testEof727:
+ m.cs = 727
+ goto _testEof
+ _testEof728:
+ m.cs = 728
+ goto _testEof
+ _testEof729:
+ m.cs = 729
+ goto _testEof
+ _testEof730:
+ m.cs = 730
+ goto _testEof
+ _testEof731:
+ m.cs = 731
+ goto _testEof
+ _testEof732:
+ m.cs = 732
+ goto _testEof
+ _testEof733:
+ m.cs = 733
+ goto _testEof
+ _testEof734:
+ m.cs = 734
+ goto _testEof
+ _testEof735:
+ m.cs = 735
+ goto _testEof
+ _testEof736:
+ m.cs = 736
+ goto _testEof
+ _testEof737:
+ m.cs = 737
+ goto _testEof
+ _testEof738:
+ m.cs = 738
+ goto _testEof
+ _testEof739:
+ m.cs = 739
+ goto _testEof
+ _testEof740:
+ m.cs = 740
+ goto _testEof
+ _testEof741:
+ m.cs = 741
+ goto _testEof
+ _testEof742:
+ m.cs = 742
+ goto _testEof
+ _testEof743:
+ m.cs = 743
+ goto _testEof
+ _testEof744:
+ m.cs = 744
+ goto _testEof
+ _testEof745:
+ m.cs = 745
+ goto _testEof
+ _testEof746:
+ m.cs = 746
+ goto _testEof
+ _testEof747:
+ m.cs = 747
+ goto _testEof
+ _testEof748:
+ m.cs = 748
+ goto _testEof
+ _testEof749:
+ m.cs = 749
+ goto _testEof
+ _testEof750:
+ m.cs = 750
+ goto _testEof
+ _testEof751:
+ m.cs = 751
+ goto _testEof
+ _testEof752:
+ m.cs = 752
+ goto _testEof
+ _testEof753:
+ m.cs = 753
+ goto _testEof
+ _testEof754:
+ m.cs = 754
+ goto _testEof
+ _testEof755:
+ m.cs = 755
+ goto _testEof
+ _testEof756:
+ m.cs = 756
+ goto _testEof
+ _testEof757:
+ m.cs = 757
+ goto _testEof
+ _testEof758:
+ m.cs = 758
+ goto _testEof
+ _testEof759:
+ m.cs = 759
+ goto _testEof
+ _testEof760:
+ m.cs = 760
+ goto _testEof
+ _testEof761:
+ m.cs = 761
+ goto _testEof
+ _testEof762:
+ m.cs = 762
+ goto _testEof
+ _testEof763:
+ m.cs = 763
+ goto _testEof
+ _testEof764:
+ m.cs = 764
+ goto _testEof
+ _testEof765:
+ m.cs = 765
+ goto _testEof
+ _testEof766:
+ m.cs = 766
+ goto _testEof
+ _testEof767:
+ m.cs = 767
+ goto _testEof
+ _testEof768:
+ m.cs = 768
+ goto _testEof
+ _testEof769:
+ m.cs = 769
+ goto _testEof
+ _testEof770:
+ m.cs = 770
+ goto _testEof
+ _testEof771:
+ m.cs = 771
+ goto _testEof
+ _testEof772:
+ m.cs = 772
+ goto _testEof
+ _testEof773:
+ m.cs = 773
+ goto _testEof
+ _testEof774:
+ m.cs = 774
+ goto _testEof
+ _testEof775:
+ m.cs = 775
+ goto _testEof
+ _testEof776:
+ m.cs = 776
+ goto _testEof
+ _testEof777:
+ m.cs = 777
+ goto _testEof
+ _testEof778:
+ m.cs = 778
+ goto _testEof
+ _testEof779:
+ m.cs = 779
+ goto _testEof
+ _testEof780:
+ m.cs = 780
+ goto _testEof
+ _testEof781:
+ m.cs = 781
+ goto _testEof
+ _testEof782:
+ m.cs = 782
+ goto _testEof
+ _testEof783:
+ m.cs = 783
+ goto _testEof
+ _testEof784:
+ m.cs = 784
+ goto _testEof
+ _testEof785:
+ m.cs = 785
+ goto _testEof
+ _testEof786:
+ m.cs = 786
+ goto _testEof
+ _testEof787:
+ m.cs = 787
+ goto _testEof
+ _testEof788:
+ m.cs = 788
+ goto _testEof
+ _testEof789:
+ m.cs = 789
+ goto _testEof
+ _testEof790:
+ m.cs = 790
+ goto _testEof
+ _testEof791:
+ m.cs = 791
+ goto _testEof
+ _testEof792:
+ m.cs = 792
+ goto _testEof
+ _testEof793:
+ m.cs = 793
+ goto _testEof
+ _testEof794:
+ m.cs = 794
+ goto _testEof
+ _testEof795:
+ m.cs = 795
+ goto _testEof
+ _testEof796:
+ m.cs = 796
+ goto _testEof
+ _testEof797:
+ m.cs = 797
+ goto _testEof
+ _testEof798:
+ m.cs = 798
+ goto _testEof
+ _testEof799:
+ m.cs = 799
+ goto _testEof
+ _testEof800:
+ m.cs = 800
+ goto _testEof
+ _testEof801:
+ m.cs = 801
+ goto _testEof
+ _testEof802:
+ m.cs = 802
+ goto _testEof
+ _testEof803:
+ m.cs = 803
+ goto _testEof
+ _testEof804:
+ m.cs = 804
+ goto _testEof
+ _testEof805:
+ m.cs = 805
+ goto _testEof
+ _testEof806:
+ m.cs = 806
+ goto _testEof
+ _testEof807:
+ m.cs = 807
+ goto _testEof
+ _testEof808:
+ m.cs = 808
+ goto _testEof
+ _testEof809:
+ m.cs = 809
+ goto _testEof
+ _testEof810:
+ m.cs = 810
+ goto _testEof
+ _testEof811:
+ m.cs = 811
+ goto _testEof
+ _testEof812:
+ m.cs = 812
+ goto _testEof
+ _testEof813:
+ m.cs = 813
+ goto _testEof
+ _testEof814:
+ m.cs = 814
+ goto _testEof
+ _testEof815:
+ m.cs = 815
+ goto _testEof
+ _testEof816:
+ m.cs = 816
+ goto _testEof
+ _testEof817:
+ m.cs = 817
+ goto _testEof
+ _testEof818:
+ m.cs = 818
+ goto _testEof
+ _testEof819:
+ m.cs = 819
+ goto _testEof
+ _testEof820:
+ m.cs = 820
+ goto _testEof
+ _testEof821:
+ m.cs = 821
+ goto _testEof
+ _testEof822:
+ m.cs = 822
+ goto _testEof
+ _testEof823:
+ m.cs = 823
+ goto _testEof
+ _testEof824:
+ m.cs = 824
+ goto _testEof
+ _testEof825:
+ m.cs = 825
+ goto _testEof
+ _testEof826:
+ m.cs = 826
+ goto _testEof
+ _testEof827:
+ m.cs = 827
+ goto _testEof
+ _testEof828:
+ m.cs = 828
+ goto _testEof
+ _testEof829:
+ m.cs = 829
+ goto _testEof
+ _testEof830:
+ m.cs = 830
+ goto _testEof
+ _testEof831:
+ m.cs = 831
+ goto _testEof
+ _testEof832:
+ m.cs = 832
+ goto _testEof
+ _testEof833:
+ m.cs = 833
+ goto _testEof
+ _testEof834:
+ m.cs = 834
+ goto _testEof
+ _testEof835:
+ m.cs = 835
+ goto _testEof
+ _testEof836:
+ m.cs = 836
+ goto _testEof
+ _testEof837:
+ m.cs = 837
+ goto _testEof
+ _testEof838:
+ m.cs = 838
+ goto _testEof
+ _testEof839:
+ m.cs = 839
+ goto _testEof
+ _testEof840:
+ m.cs = 840
+ goto _testEof
+ _testEof841:
+ m.cs = 841
+ goto _testEof
+ _testEof842:
+ m.cs = 842
+ goto _testEof
+ _testEof843:
+ m.cs = 843
+ goto _testEof
+ _testEof844:
+ m.cs = 844
+ goto _testEof
+ _testEof845:
+ m.cs = 845
+ goto _testEof
+ _testEof846:
+ m.cs = 846
+ goto _testEof
+ _testEof847:
+ m.cs = 847
+ goto _testEof
+ _testEof848:
+ m.cs = 848
+ goto _testEof
+ _testEof849:
+ m.cs = 849
+ goto _testEof
+ _testEof850:
+ m.cs = 850
+ goto _testEof
+ _testEof851:
+ m.cs = 851
+ goto _testEof
+ _testEof852:
+ m.cs = 852
+ goto _testEof
+ _testEof853:
+ m.cs = 853
+ goto _testEof
+ _testEof854:
+ m.cs = 854
+ goto _testEof
+ _testEof855:
+ m.cs = 855
+ goto _testEof
+ _testEof856:
+ m.cs = 856
+ goto _testEof
+ _testEof857:
+ m.cs = 857
+ goto _testEof
+ _testEof858:
+ m.cs = 858
+ goto _testEof
+ _testEof859:
+ m.cs = 859
+ goto _testEof
+ _testEof860:
+ m.cs = 860
+ goto _testEof
+ _testEof861:
+ m.cs = 861
+ goto _testEof
+ _testEof862:
+ m.cs = 862
+ goto _testEof
+ _testEof863:
+ m.cs = 863
+ goto _testEof
+ _testEof864:
+ m.cs = 864
+ goto _testEof
+ _testEof865:
+ m.cs = 865
+ goto _testEof
+ _testEof866:
+ m.cs = 866
+ goto _testEof
+ _testEof867:
+ m.cs = 867
+ goto _testEof
+ _testEof868:
+ m.cs = 868
+ goto _testEof
+ _testEof869:
+ m.cs = 869
+ goto _testEof
+ _testEof870:
+ m.cs = 870
+ goto _testEof
+ _testEof871:
+ m.cs = 871
+ goto _testEof
+ _testEof872:
+ m.cs = 872
+ goto _testEof
+ _testEof873:
+ m.cs = 873
+ goto _testEof
+ _testEof874:
+ m.cs = 874
+ goto _testEof
+ _testEof875:
+ m.cs = 875
+ goto _testEof
+ _testEof876:
+ m.cs = 876
+ goto _testEof
+ _testEof877:
+ m.cs = 877
+ goto _testEof
+ _testEof878:
+ m.cs = 878
+ goto _testEof
+ _testEof879:
+ m.cs = 879
+ goto _testEof
+ _testEof880:
+ m.cs = 880
+ goto _testEof
+ _testEof881:
+ m.cs = 881
+ goto _testEof
+ _testEof882:
+ m.cs = 882
+ goto _testEof
+ _testEof883:
+ m.cs = 883
+ goto _testEof
+ _testEof884:
+ m.cs = 884
+ goto _testEof
+ _testEof885:
+ m.cs = 885
+ goto _testEof
+ _testEof886:
+ m.cs = 886
+ goto _testEof
+ _testEof887:
+ m.cs = 887
+ goto _testEof
+ _testEof888:
+ m.cs = 888
+ goto _testEof
+ _testEof889:
+ m.cs = 889
+ goto _testEof
+ _testEof890:
+ m.cs = 890
+ goto _testEof
+ _testEof891:
+ m.cs = 891
+ goto _testEof
+ _testEof892:
+ m.cs = 892
+ goto _testEof
+ _testEof893:
+ m.cs = 893
+ goto _testEof
+ _testEof894:
+ m.cs = 894
+ goto _testEof
+ _testEof895:
+ m.cs = 895
+ goto _testEof
+ _testEof896:
+ m.cs = 896
+ goto _testEof
+ _testEof897:
+ m.cs = 897
+ goto _testEof
+ _testEof898:
+ m.cs = 898
+ goto _testEof
+ _testEof899:
+ m.cs = 899
+ goto _testEof
+ _testEof900:
+ m.cs = 900
+ goto _testEof
+ _testEof901:
+ m.cs = 901
+ goto _testEof
+ _testEof902:
+ m.cs = 902
+ goto _testEof
+ _testEof903:
+ m.cs = 903
+ goto _testEof
+ _testEof904:
+ m.cs = 904
+ goto _testEof
+ _testEof905:
+ m.cs = 905
+ goto _testEof
+ _testEof906:
+ m.cs = 906
+ goto _testEof
+ _testEof907:
+ m.cs = 907
+ goto _testEof
+ _testEof908:
+ m.cs = 908
+ goto _testEof
+ _testEof909:
+ m.cs = 909
+ goto _testEof
+ _testEof910:
+ m.cs = 910
+ goto _testEof
+ _testEof911:
+ m.cs = 911
+ goto _testEof
+ _testEof912:
+ m.cs = 912
+ goto _testEof
+ _testEof913:
+ m.cs = 913
+ goto _testEof
+ _testEof914:
+ m.cs = 914
+ goto _testEof
+ _testEof915:
+ m.cs = 915
+ goto _testEof
+ _testEof916:
+ m.cs = 916
+ goto _testEof
+ _testEof917:
+ m.cs = 917
+ goto _testEof
+ _testEof918:
+ m.cs = 918
+ goto _testEof
+ _testEof919:
+ m.cs = 919
+ goto _testEof
+ _testEof920:
+ m.cs = 920
+ goto _testEof
+ _testEof921:
+ m.cs = 921
+ goto _testEof
+ _testEof922:
+ m.cs = 922
+ goto _testEof
+ _testEof923:
+ m.cs = 923
+ goto _testEof
+ _testEof924:
+ m.cs = 924
+ goto _testEof
+ _testEof925:
+ m.cs = 925
+ goto _testEof
+ _testEof926:
+ m.cs = 926
+ goto _testEof
+ _testEof927:
+ m.cs = 927
+ goto _testEof
+ _testEof928:
+ m.cs = 928
+ goto _testEof
+ _testEof929:
+ m.cs = 929
+ goto _testEof
+ _testEof930:
+ m.cs = 930
+ goto _testEof
+ _testEof931:
+ m.cs = 931
+ goto _testEof
+ _testEof932:
+ m.cs = 932
+ goto _testEof
+ _testEof933:
+ m.cs = 933
+ goto _testEof
+ _testEof934:
+ m.cs = 934
+ goto _testEof
+ _testEof935:
+ m.cs = 935
+ goto _testEof
+ _testEof936:
+ m.cs = 936
+ goto _testEof
+ _testEof937:
+ m.cs = 937
+ goto _testEof
+ _testEof938:
+ m.cs = 938
+ goto _testEof
+ _testEof939:
+ m.cs = 939
+ goto _testEof
+ _testEof940:
+ m.cs = 940
+ goto _testEof
+ _testEof941:
+ m.cs = 941
+ goto _testEof
+ _testEof942:
+ m.cs = 942
+ goto _testEof
+ _testEof943:
+ m.cs = 943
+ goto _testEof
+ _testEof944:
+ m.cs = 944
+ goto _testEof
+ _testEof945:
+ m.cs = 945
+ goto _testEof
+ _testEof946:
+ m.cs = 946
+ goto _testEof
+ _testEof947:
+ m.cs = 947
+ goto _testEof
+ _testEof948:
+ m.cs = 948
+ goto _testEof
+ _testEof949:
+ m.cs = 949
+ goto _testEof
+ _testEof950:
+ m.cs = 950
+ goto _testEof
+ _testEof951:
+ m.cs = 951
+ goto _testEof
+ _testEof952:
+ m.cs = 952
+ goto _testEof
+ _testEof953:
+ m.cs = 953
+ goto _testEof
+ _testEof954:
+ m.cs = 954
+ goto _testEof
+ _testEof955:
+ m.cs = 955
+ goto _testEof
+ _testEof956:
+ m.cs = 956
+ goto _testEof
+ _testEof957:
+ m.cs = 957
+ goto _testEof
+ _testEof958:
+ m.cs = 958
+ goto _testEof
+ _testEof959:
+ m.cs = 959
+ goto _testEof
+ _testEof960:
+ m.cs = 960
+ goto _testEof
+ _testEof961:
+ m.cs = 961
+ goto _testEof
+ _testEof962:
+ m.cs = 962
+ goto _testEof
+ _testEof963:
+ m.cs = 963
+ goto _testEof
+ _testEof964:
+ m.cs = 964
+ goto _testEof
+ _testEof965:
+ m.cs = 965
+ goto _testEof
+ _testEof966:
+ m.cs = 966
+ goto _testEof
+ _testEof967:
+ m.cs = 967
+ goto _testEof
+ _testEof968:
+ m.cs = 968
+ goto _testEof
+ _testEof969:
+ m.cs = 969
+ goto _testEof
+ _testEof970:
+ m.cs = 970
+ goto _testEof
+ _testEof971:
+ m.cs = 971
+ goto _testEof
+ _testEof972:
+ m.cs = 972
+ goto _testEof
+ _testEof973:
+ m.cs = 973
+ goto _testEof
+ _testEof974:
+ m.cs = 974
+ goto _testEof
+ _testEof975:
+ m.cs = 975
+ goto _testEof
+ _testEof976:
+ m.cs = 976
+ goto _testEof
+ _testEof977:
+ m.cs = 977
+ goto _testEof
+ _testEof978:
+ m.cs = 978
+ goto _testEof
+ _testEof979:
+ m.cs = 979
+ goto _testEof
+ _testEof980:
+ m.cs = 980
+ goto _testEof
+ _testEof981:
+ m.cs = 981
+ goto _testEof
+ _testEof982:
+ m.cs = 982
+ goto _testEof
+ _testEof983:
+ m.cs = 983
+ goto _testEof
+ _testEof984:
+ m.cs = 984
+ goto _testEof
+ _testEof985:
+ m.cs = 985
+ goto _testEof
+ _testEof986:
+ m.cs = 986
+ goto _testEof
+ _testEof21:
+ m.cs = 21
+ goto _testEof
+ _testEof22:
+ m.cs = 22
+ goto _testEof
+ _testEof23:
+ m.cs = 23
+ goto _testEof
+ _testEof24:
+ m.cs = 24
+ goto _testEof
+ _testEof25:
+ m.cs = 25
+ goto _testEof
+ _testEof26:
+ m.cs = 26
+ goto _testEof
+ _testEof27:
+ m.cs = 27
+ goto _testEof
+ _testEof28:
+ m.cs = 28
+ goto _testEof
+ _testEof29:
+ m.cs = 29
+ goto _testEof
+ _testEof30:
+ m.cs = 30
+ goto _testEof
+ _testEof31:
+ m.cs = 31
+ goto _testEof
+ _testEof32:
+ m.cs = 32
+ goto _testEof
+ _testEof33:
+ m.cs = 33
+ goto _testEof
+ _testEof34:
+ m.cs = 34
+ goto _testEof
+ _testEof35:
+ m.cs = 35
+ goto _testEof
+ _testEof36:
+ m.cs = 36
+ goto _testEof
+ _testEof37:
+ m.cs = 37
+ goto _testEof
+ _testEof38:
+ m.cs = 38
+ goto _testEof
+ _testEof39:
+ m.cs = 39
+ goto _testEof
+ _testEof40:
+ m.cs = 40
+ goto _testEof
+ _testEof41:
+ m.cs = 41
+ goto _testEof
+ _testEof42:
+ m.cs = 42
+ goto _testEof
+ _testEof43:
+ m.cs = 43
+ goto _testEof
+ _testEof44:
+ m.cs = 44
+ goto _testEof
+ _testEof45:
+ m.cs = 45
+ goto _testEof
+ _testEof46:
+ m.cs = 46
+ goto _testEof
+ _testEof47:
+ m.cs = 47
+ goto _testEof
+ _testEof48:
+ m.cs = 48
+ goto _testEof
+ _testEof49:
+ m.cs = 49
+ goto _testEof
+ _testEof50:
+ m.cs = 50
+ goto _testEof
+ _testEof51:
+ m.cs = 51
+ goto _testEof
+ _testEof52:
+ m.cs = 52
+ goto _testEof
+ _testEof53:
+ m.cs = 53
+ goto _testEof
+ _testEof54:
+ m.cs = 54
+ goto _testEof
+ _testEof55:
+ m.cs = 55
+ goto _testEof
+ _testEof56:
+ m.cs = 56
+ goto _testEof
+ _testEof57:
+ m.cs = 57
+ goto _testEof
+ _testEof58:
+ m.cs = 58
+ goto _testEof
+ _testEof59:
+ m.cs = 59
+ goto _testEof
+ _testEof60:
+ m.cs = 60
+ goto _testEof
+ _testEof61:
+ m.cs = 61
+ goto _testEof
+ _testEof62:
+ m.cs = 62
+ goto _testEof
+ _testEof63:
+ m.cs = 63
+ goto _testEof
+ _testEof64:
+ m.cs = 64
+ goto _testEof
+ _testEof65:
+ m.cs = 65
+ goto _testEof
+ _testEof66:
+ m.cs = 66
+ goto _testEof
+ _testEof67:
+ m.cs = 67
+ goto _testEof
+ _testEof68:
+ m.cs = 68
+ goto _testEof
+ _testEof69:
+ m.cs = 69
+ goto _testEof
+ _testEof70:
+ m.cs = 70
+ goto _testEof
+ _testEof71:
+ m.cs = 71
+ goto _testEof
+ _testEof72:
+ m.cs = 72
+ goto _testEof
+ _testEof987:
+ m.cs = 987
+ goto _testEof
+
+ _testEof:
+ {
+ }
+ if (m.p) == (m.eof) {
+ switch m.cs {
+ case 73, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829, 830, 831, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 842, 843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 901, 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986:
+
+ output.message = string(m.text())
+
+ case 1:
+
+ m.err = fmt.Errorf(errPri, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+
+ case 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39:
+
+ m.err = fmt.Errorf(errTimestamp, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+
+ case 58, 59, 60, 61, 62, 63, 65:
+
+ m.err = fmt.Errorf(errRFC3339, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+
+ case 2, 3, 70, 71, 72:
+
+ m.err = fmt.Errorf(errPrival, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+
+ m.err = fmt.Errorf(errPri, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+
+ case 20:
+
+ m.err = fmt.Errorf(errHostname, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+
+ m.err = fmt.Errorf(errTag, m.p)
+ (m.p)--
+
+ {
+ goto st987
+ }
+ }
+ }
+
+ _out:
+ {
+ }
+ }
+
+ if m.cs < firstFinal || m.cs == enFail {
+ if m.bestEffort && output.minimal() {
+ // An error occurred but partial parsing is on and partial message is minimally valid
+ return output.export(), m.err
+ }
+ return nil, m.err
+ }
+
+ return output.export(), nil
+}
diff --git a/vendor/github.com/influxdata/go-syslog/v3/rfc3164/machine.go.rl b/vendor/github.com/leodido/go-syslog/v4/rfc3164/machine.go.rl
similarity index 87%
rename from vendor/github.com/influxdata/go-syslog/v3/rfc3164/machine.go.rl
rename to vendor/github.com/leodido/go-syslog/v4/rfc3164/machine.go.rl
index 4d13817525..7e66ba23d1 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/rfc3164/machine.go.rl
+++ b/vendor/github.com/leodido/go-syslog/v4/rfc3164/machine.go.rl
@@ -4,8 +4,8 @@ import (
"fmt"
"time"
- "github.com/influxdata/go-syslog/v3"
- "github.com/influxdata/go-syslog/v3/common"
+ "github.com/leodido/go-syslog/v4"
+ "github.com/leodido/go-syslog/v4/common"
)
var (
@@ -137,20 +137,21 @@ rfc3339 = fulldate >mark 'T' hhmmss timeoffset %set_rfc3339 @err(err_rfc3339);
# note > RFC 3164 says "The Domain Name MUST NOT be included in the HOSTNAME field"
# note > this could mean that the we may need to create and to use a labelrange = graph{1,63} here if we want the parser to be stricter.
-hostname = hostnamerange >mark %set_hostname $err(err_hostname);
+hostname = (hostnamerange -- ':') >mark %set_hostname $err(err_hostname);
# Section 4.1.3
# note > alnum{1,32} is too restrictive (eg., no dashes)
# note > see https://tools.ietf.org/html/rfc2234#section-2.1 for an interpretation of "ABNF alphanumeric" as stated by RFC 3164 regarding the tag
# note > while RFC3164 assumes only ABNF alphanumeric process names, many BSD-syslog contains processe names with additional characters (-, _, .)
-tag = (print -- [ :\[]){1,32} >mark %set_tag @err(err_tag);
+# note > should be {1,32} but Unifi thinks it can be up to 48 characters
+tag = (print -- [ :\[]){1,48} >mark %set_tag @err(err_tag);
visible = print | 0x80..0xFF;
# The first not alphanumeric character starts the content (usually containing a PID) part of the message part
-contentval = !alnum @err(err_contentstart) >mark print* %set_content @err(err_content);
+contentval = (print -- [ \[\]])* >mark %set_content @err(err_content);
-content = '[' contentval ']'; # todo(leodido) > support ':' and ' ' too. Also they have to match?
+content = '[' contentval ']' @err(err_contentstart); # todo(leodido) > support ':' and ' ' too. Also they have to match?
mex = visible+ >mark %set_message;
@@ -158,7 +159,9 @@ msg = (tag content? ':' sp)? mex;
fail := (any - [\n\r])* @err{ fgoto main; };
-main := pri (timestamp | (rfc3339 when { m.rfc3339 })) sp hostname sp msg;
+# note > some BSD syslog implementations insert extra spaces between "PRI", "Timestamp", and "Hostname": although these strictly violate RFC3164, it is useful to be able to parse them
+# note > OpenBSD like many other hardware sends syslog messages without hostname
+main := pri sp* (timestamp | (rfc3339 when { m.rfc3339 })) sp+ (hostname sp+)? msg '\n'?;
}%%
diff --git a/vendor/github.com/influxdata/go-syslog/v3/rfc3164/options.go b/vendor/github.com/leodido/go-syslog/v4/rfc3164/options.go
similarity index 98%
rename from vendor/github.com/influxdata/go-syslog/v3/rfc3164/options.go
rename to vendor/github.com/leodido/go-syslog/v4/rfc3164/options.go
index 38175b9aaa..0c6c1a2bcd 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/rfc3164/options.go
+++ b/vendor/github.com/leodido/go-syslog/v4/rfc3164/options.go
@@ -3,7 +3,7 @@ package rfc3164
import (
"time"
- syslog "github.com/influxdata/go-syslog/v3"
+ syslog "github.com/leodido/go-syslog/v4"
)
// WithBestEffort enables the best effort mode.
diff --git a/vendor/github.com/influxdata/go-syslog/v3/rfc3164/parser.go b/vendor/github.com/leodido/go-syslog/v4/rfc3164/parser.go
similarity index 95%
rename from vendor/github.com/influxdata/go-syslog/v3/rfc3164/parser.go
rename to vendor/github.com/leodido/go-syslog/v4/rfc3164/parser.go
index f31b588dcb..1b0a4ad93c 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/rfc3164/parser.go
+++ b/vendor/github.com/leodido/go-syslog/v4/rfc3164/parser.go
@@ -3,7 +3,7 @@ package rfc3164
import (
"sync"
- syslog "github.com/influxdata/go-syslog/v3"
+ syslog "github.com/leodido/go-syslog/v4"
)
// parser represent a RFC3164 parser with mutex capabilities.
diff --git a/vendor/github.com/influxdata/go-syslog/v3/rfc3164/syslog_message.go b/vendor/github.com/leodido/go-syslog/v4/rfc3164/syslog_message.go
similarity index 94%
rename from vendor/github.com/influxdata/go-syslog/v3/rfc3164/syslog_message.go
rename to vendor/github.com/leodido/go-syslog/v4/rfc3164/syslog_message.go
index 0ac367440f..eea96eda10 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/rfc3164/syslog_message.go
+++ b/vendor/github.com/leodido/go-syslog/v4/rfc3164/syslog_message.go
@@ -3,8 +3,8 @@ package rfc3164
import (
"time"
- "github.com/influxdata/go-syslog/v3"
- "github.com/influxdata/go-syslog/v3/common"
+ "github.com/leodido/go-syslog/v4"
+ "github.com/leodido/go-syslog/v4/common"
)
type syslogMessage struct {
diff --git a/vendor/github.com/influxdata/go-syslog/v3/rfc3164/year.go b/vendor/github.com/leodido/go-syslog/v4/rfc3164/year.go
similarity index 100%
rename from vendor/github.com/influxdata/go-syslog/v3/rfc3164/year.go
rename to vendor/github.com/leodido/go-syslog/v4/rfc3164/year.go
diff --git a/vendor/github.com/influxdata/go-syslog/v3/rfc5424/builder.go b/vendor/github.com/leodido/go-syslog/v4/rfc5424/builder.go
similarity index 99%
rename from vendor/github.com/influxdata/go-syslog/v3/rfc5424/builder.go
rename to vendor/github.com/leodido/go-syslog/v4/rfc5424/builder.go
index 4aa3baaa99..f3bdbc8263 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/rfc5424/builder.go
+++ b/vendor/github.com/leodido/go-syslog/v4/rfc5424/builder.go
@@ -5,11 +5,10 @@ import (
"sort"
"time"
- "github.com/influxdata/go-syslog/v3/common"
+ "github.com/leodido/go-syslog/v4/common"
)
// todo(leodido) > support best effort for builder ?
-
const builderStart int = 52
const builderEnTimestamp int = 1
@@ -72,7 +71,6 @@ func (sm *SyslogMessage) set(from entrypoint, value string) *SyslogMessage {
eof := len(data)
cs := from.translate()
backslashes := []int{}
-
{
if p == pe {
goto _testEof
@@ -9252,7 +9250,6 @@ func (sm *SyslogMessage) set(from entrypoint, value string) *SyslogMessage {
if s := string(data[pb:p]); s != "" {
sm.Message = &s
}
-
}
}
@@ -9380,7 +9377,7 @@ func (sm *SyslogMessage) String() (string, error) {
if sm.StructuredData != nil {
// Sort element identifiers
identifiers := make([]string, 0)
- for k := range *sm.StructuredData {
+ for k, _ := range *sm.StructuredData {
identifiers = append(identifiers, k)
}
sort.Strings(identifiers)
@@ -9392,7 +9389,7 @@ func (sm *SyslogMessage) String() (string, error) {
// Sort parameter names
params := (*sm.StructuredData)[id]
names := make([]string, 0)
- for n := range params {
+ for n, _ := range params {
names = append(names, n)
}
sort.Strings(names)
diff --git a/vendor/github.com/influxdata/go-syslog/v3/rfc5424/builder.go.rl b/vendor/github.com/leodido/go-syslog/v4/rfc5424/builder.go.rl
similarity index 99%
rename from vendor/github.com/influxdata/go-syslog/v3/rfc5424/builder.go.rl
rename to vendor/github.com/leodido/go-syslog/v4/rfc5424/builder.go.rl
index 3eaccf224b..2a3d2750b4 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/rfc5424/builder.go.rl
+++ b/vendor/github.com/leodido/go-syslog/v4/rfc5424/builder.go.rl
@@ -5,7 +5,7 @@ import (
"sort"
"fmt"
- "github.com/influxdata/go-syslog/v3/common"
+ "github.com/leodido/go-syslog/v4/common"
)
// todo(leodido) > support best effort for builder ?
diff --git a/vendor/github.com/influxdata/go-syslog/v3/rfc5424/machine.go b/vendor/github.com/leodido/go-syslog/v4/rfc5424/machine.go
similarity index 99%
rename from vendor/github.com/influxdata/go-syslog/v3/rfc5424/machine.go
rename to vendor/github.com/leodido/go-syslog/v4/rfc5424/machine.go
index ddd4a7c6f2..f54e08835d 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/rfc5424/machine.go
+++ b/vendor/github.com/leodido/go-syslog/v4/rfc5424/machine.go
@@ -4,8 +4,8 @@ import (
"fmt"
"time"
- "github.com/influxdata/go-syslog/v3"
- "github.com/influxdata/go-syslog/v3/common"
+ "github.com/leodido/go-syslog/v4"
+ "github.com/leodido/go-syslog/v4/common"
)
// ColumnPositionTemplate is the template used to communicate the column where errors occur.
@@ -48,7 +48,6 @@ const (
// RFC3339MICRO represents the timestamp format that RFC5424 mandates.
const RFC3339MICRO = "2006-01-02T15:04:05.999999Z07:00"
-
const start int = 1
const firstFinal int = 603
@@ -120,11 +119,9 @@ func (m *machine) Parse(input []byte) (syslog.Message, error) {
m.eof = len(input)
m.err = nil
output := &syslogMessage{}
-
{
m.cs = start
}
-
{
if (m.p) == (m.pe) {
goto _testEof
@@ -1729,7 +1726,6 @@ func (m *machine) Parse(input []byte) (syslog.Message, error) {
output.priority = uint8(common.UnsafeUTF8DecimalCodePointsToInt(m.text()))
output.prioritySet = true
-
if (m.data)[(m.p)] == 62 {
goto st4
}
@@ -1755,7 +1751,6 @@ func (m *machine) Parse(input []byte) (syslog.Message, error) {
stCase5:
output.version = uint16(common.UnsafeUTF8DecimalCodePointsToInt(m.text()))
-
if (m.data)[(m.p)] == 32 {
goto st6
}
@@ -9583,7 +9578,6 @@ func (m *machine) Parse(input []byte) (syslog.Message, error) {
stCase591:
output.version = uint16(common.UnsafeUTF8DecimalCodePointsToInt(m.text()))
-
if (m.data)[(m.p)] == 32 {
goto st6
}
@@ -9598,7 +9592,6 @@ func (m *machine) Parse(input []byte) (syslog.Message, error) {
stCase592:
output.version = uint16(common.UnsafeUTF8DecimalCodePointsToInt(m.text()))
-
if (m.data)[(m.p)] == 32 {
goto st6
}
@@ -9616,7 +9609,6 @@ func (m *machine) Parse(input []byte) (syslog.Message, error) {
output.priority = uint8(common.UnsafeUTF8DecimalCodePointsToInt(m.text()))
output.prioritySet = true
-
switch (m.data)[(m.p)] {
case 57:
goto st595
@@ -9640,7 +9632,6 @@ func (m *machine) Parse(input []byte) (syslog.Message, error) {
output.priority = uint8(common.UnsafeUTF8DecimalCodePointsToInt(m.text()))
output.prioritySet = true
-
if (m.data)[(m.p)] == 62 {
goto st4
}
@@ -9656,7 +9647,6 @@ func (m *machine) Parse(input []byte) (syslog.Message, error) {
output.priority = uint8(common.UnsafeUTF8DecimalCodePointsToInt(m.text()))
output.prioritySet = true
-
if (m.data)[(m.p)] == 62 {
goto st4
}
@@ -12025,7 +12015,6 @@ func (m *machine) Parse(input []byte) (syslog.Message, error) {
{
goto st614
}
-
}
}
diff --git a/vendor/github.com/influxdata/go-syslog/v3/rfc5424/machine.go.rl b/vendor/github.com/leodido/go-syslog/v4/rfc5424/machine.go.rl
similarity index 99%
rename from vendor/github.com/influxdata/go-syslog/v3/rfc5424/machine.go.rl
rename to vendor/github.com/leodido/go-syslog/v4/rfc5424/machine.go.rl
index f0b9c821d4..6005496399 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/rfc5424/machine.go.rl
+++ b/vendor/github.com/leodido/go-syslog/v4/rfc5424/machine.go.rl
@@ -4,8 +4,8 @@ import (
"time"
"fmt"
- "github.com/influxdata/go-syslog/v3"
- "github.com/influxdata/go-syslog/v3/common"
+ "github.com/leodido/go-syslog/v4"
+ "github.com/leodido/go-syslog/v4/common"
)
// ColumnPositionTemplate is the template used to communicate the column where errors occur.
diff --git a/vendor/github.com/influxdata/go-syslog/v3/rfc5424/options.go b/vendor/github.com/leodido/go-syslog/v4/rfc5424/options.go
similarity index 94%
rename from vendor/github.com/influxdata/go-syslog/v3/rfc5424/options.go
rename to vendor/github.com/leodido/go-syslog/v4/rfc5424/options.go
index 05db00914a..ff3a009b50 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/rfc5424/options.go
+++ b/vendor/github.com/leodido/go-syslog/v4/rfc5424/options.go
@@ -1,7 +1,7 @@
package rfc5424
import (
- syslog "github.com/influxdata/go-syslog/v3"
+ syslog "github.com/leodido/go-syslog/v4"
)
// WithBestEffort enables the best effort mode.
diff --git a/vendor/github.com/influxdata/go-syslog/v3/rfc5424/parser.go b/vendor/github.com/leodido/go-syslog/v4/rfc5424/parser.go
similarity index 95%
rename from vendor/github.com/influxdata/go-syslog/v3/rfc5424/parser.go
rename to vendor/github.com/leodido/go-syslog/v4/rfc5424/parser.go
index 31417aa569..85e742f700 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/rfc5424/parser.go
+++ b/vendor/github.com/leodido/go-syslog/v4/rfc5424/parser.go
@@ -3,7 +3,7 @@ package rfc5424
import (
"sync"
- syslog "github.com/influxdata/go-syslog/v3"
+ syslog "github.com/leodido/go-syslog/v4"
)
// parser represent a RFC5424 parser with mutex capabilities.
diff --git a/vendor/github.com/influxdata/go-syslog/v3/rfc5424/syslog_message.go b/vendor/github.com/leodido/go-syslog/v4/rfc5424/syslog_message.go
similarity index 96%
rename from vendor/github.com/influxdata/go-syslog/v3/rfc5424/syslog_message.go
rename to vendor/github.com/leodido/go-syslog/v4/rfc5424/syslog_message.go
index 9f944a3f99..32211fe0f3 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/rfc5424/syslog_message.go
+++ b/vendor/github.com/leodido/go-syslog/v4/rfc5424/syslog_message.go
@@ -3,8 +3,8 @@ package rfc5424
import (
"time"
- "github.com/influxdata/go-syslog/v3"
- "github.com/influxdata/go-syslog/v3/common"
+ "github.com/leodido/go-syslog/v4"
+ "github.com/leodido/go-syslog/v4/common"
)
type syslogMessage struct {
diff --git a/vendor/github.com/influxdata/go-syslog/v3/syslog.go b/vendor/github.com/leodido/go-syslog/v4/syslog.go
similarity index 98%
rename from vendor/github.com/influxdata/go-syslog/v3/syslog.go
rename to vendor/github.com/leodido/go-syslog/v4/syslog.go
index e00079453a..a9deef3f3c 100644
--- a/vendor/github.com/influxdata/go-syslog/v3/syslog.go
+++ b/vendor/github.com/leodido/go-syslog/v4/syslog.go
@@ -6,7 +6,7 @@ import (
"io"
"time"
- "github.com/influxdata/go-syslog/v3/common"
+ "github.com/leodido/go-syslog/v4/common"
)
// BestEfforter is an interface that wraps the HasBestEffort method.
diff --git a/vendor/github.com/linode/linodego/.golangci.yml b/vendor/github.com/linode/linodego/.golangci.yml
index e37e28aedb..063292f353 100644
--- a/vendor/github.com/linode/linodego/.golangci.yml
+++ b/vendor/github.com/linode/linodego/.golangci.yml
@@ -7,22 +7,15 @@ linters-settings:
check-blank: true
govet:
- check-shadowing: true
-
enable:
- atomicalign
- enable-all: false
- disable:
- shadow
+ enable-all: false
disable-all: false
- golint:
- min-confidence: 0.8
gocyclo:
min-complexity: 30
gocognit:
min-complexity: 30
- maligned:
- suggest-new: true
dupl:
threshold: 100
diff --git a/vendor/github.com/linode/linodego/Makefile b/vendor/github.com/linode/linodego/Makefile
index 3597c2a7bb..460dd08024 100644
--- a/vendor/github.com/linode/linodego/Makefile
+++ b/vendor/github.com/linode/linodego/Makefile
@@ -25,9 +25,10 @@ citest: lint test
testunit:
go test -v $(PACKAGES) $(ARGS)
+ cd test && make testunit
testint:
- cd test && make test
+ cd test && make testint
testcov-func:
@go test -v -coverprofile="coverage.txt" . > /dev/null 2>&1
diff --git a/vendor/github.com/linode/linodego/account_availability.go b/vendor/github.com/linode/linodego/account_availability.go
index 9a846415c9..d0341083bc 100644
--- a/vendor/github.com/linode/linodego/account_availability.go
+++ b/vendor/github.com/linode/linodego/account_availability.go
@@ -8,13 +8,16 @@ import (
"github.com/go-resty/resty/v2"
)
-// AccountAvailability returns the resources information in a region which are NOT available to an account.
+// AccountAvailability returns the resources availability in a region to an account.
type AccountAvailability struct {
// region id
Region string `json:"region"`
// the unavailable resources in a region to the customer
Unavailable []string `json:"unavailable"`
+
+ // the available resources in a region to the customer
+ Available []string `json:"available"`
}
// AccountAvailabilityPagedResponse represents a paginated Account Availability API response
@@ -38,7 +41,7 @@ func (resp *AccountAvailabilityPagedResponse) castResult(r *resty.Request, e str
return castedRes.Pages, castedRes.Results, nil
}
-// ListAccountAvailabilities lists all available regions and the resources which are NOT available to the account.
+// ListAccountAvailabilities lists all regions and the resource availabilities to the account.
func (c *Client) ListAccountAvailabilities(ctx context.Context, opts *ListOptions) ([]AccountAvailability, error) {
response := AccountAvailabilityPagedResponse{}
err := c.listHelper(ctx, &response, opts)
@@ -48,7 +51,7 @@ func (c *Client) ListAccountAvailabilities(ctx context.Context, opts *ListOption
return response.Data, nil
}
-// GetAccountAvailability gets the unavailable resources in a region to the customer.
+// GetAccountAvailability gets the resources availability in a region to the customer.
func (c *Client) GetAccountAvailability(ctx context.Context, regionID string) (*AccountAvailability, error) {
req := c.R(ctx).SetResult(&AccountAvailability{})
regionID = url.PathEscape(regionID)
diff --git a/vendor/github.com/linode/linodego/account_events.go b/vendor/github.com/linode/linodego/account_events.go
index 02d1ed65a5..0f8565d180 100644
--- a/vendor/github.com/linode/linodego/account_events.go
+++ b/vendor/github.com/linode/linodego/account_events.go
@@ -62,7 +62,7 @@ const (
ActionBackupsRestore EventAction = "backups_restore"
ActionCommunityQuestionReply EventAction = "community_question_reply"
ActionCommunityLike EventAction = "community_like"
- ActionCreateCardUpdated EventAction = "credit_card_updated"
+ ActionCreditCardUpdated EventAction = "credit_card_updated"
ActionDatabaseCreate EventAction = "database_create"
ActionDatabaseDegraded EventAction = "database_degraded"
ActionDatabaseDelete EventAction = "database_delete"
@@ -149,6 +149,9 @@ const (
ActionOAuthClientDelete EventAction = "oauth_client_delete"
ActionOAuthClientSecretReset EventAction = "oauth_client_secret_reset" //#nosec G101
ActionOAuthClientUpdate EventAction = "oauth_client_update"
+ ActionOBJAccessKeyCreate EventAction = "obj_access_key_create"
+ ActionOBJAccessKeyDelete EventAction = "obj_access_key_delete"
+ ActionOBJAccessKeyUpdate EventAction = "obj_access_key_update"
ActionPaymentMethodAdd EventAction = "payment_method_add"
ActionPaymentSubmitted EventAction = "payment_submitted"
ActionPasswordReset EventAction = "password_reset"
@@ -193,6 +196,10 @@ const (
// Deprecated: incorrect spelling,
// to be removed in the next major version release.
ActionVolumeDelte EventAction = "volume_delete"
+
+ // Deprecated: incorrect spelling,
+ // to be removed in the next major version
+ ActionCreateCardUpdated = ActionCreditCardUpdated
)
// EntityType constants start with Entity and include Linode API Event Entity Types
@@ -200,14 +207,31 @@ type EntityType string
// EntityType contants are the entities an Event can be related to.
const (
- EntityLinode EntityType = "linode"
- EntityDisk EntityType = "disk"
- EntityDatabase EntityType = "database"
- EntityDomain EntityType = "domain"
- EntityFirewall EntityType = "firewall"
- EntityNodebalancer EntityType = "nodebalancer"
- EntityVPC EntityType = "vpc"
- EntityVPCSubnet EntityType = "subnet"
+ EntityAccount EntityType = "account"
+ EntityBackups EntityType = "backups"
+ EntityCommunity EntityType = "community"
+ EntityDatabase EntityType = "database"
+ EntityDisk EntityType = "disk"
+ EntityDomain EntityType = "domain"
+ EntityTransfer EntityType = "entity_transfer"
+ EntityFirewall EntityType = "firewall"
+ EntityImage EntityType = "image"
+ EntityIPAddress EntityType = "ipaddress"
+ EntityLinode EntityType = "linode"
+ EntityLongview EntityType = "longview"
+ EntityManagedService EntityType = "managed_service"
+ EntityNodebalancer EntityType = "nodebalancer"
+ EntityOAuthClient EntityType = "oauth_client"
+ EntityProfile EntityType = "profile"
+ EntityStackscript EntityType = "stackscript"
+ EntityTag EntityType = "tag"
+ EntityTicket EntityType = "ticket"
+ EntityToken EntityType = "token"
+ EntityUser EntityType = "user"
+ EntityUserSSHKey EntityType = "user_ssh_key"
+ EntityVolume EntityType = "volume"
+ EntityVPC EntityType = "vpc"
+ EntityVPCSubnet EntityType = "subnet"
)
// EventStatus constants start with Event and include Linode API Event Status values
diff --git a/vendor/github.com/linode/linodego/client.go b/vendor/github.com/linode/linodego/client.go
index ddf4dd349b..55436298af 100644
--- a/vendor/github.com/linode/linodego/client.go
+++ b/vendor/github.com/linode/linodego/client.go
@@ -10,7 +10,9 @@ import (
"path"
"path/filepath"
"reflect"
+ "regexp"
"strconv"
+ "strings"
"sync"
"time"
@@ -138,6 +140,37 @@ func (c *Client) OnBeforeRequest(m func(request *Request) error) {
})
}
+// UseURL parses the individual components of the given API URL and configures the client
+// accordingly. For example, a valid URL.
+// For example:
+//
+// client.UseURL("https://api.test.linode.com/v4beta")
+func (c *Client) UseURL(apiURL string) (*Client, error) {
+ parsedURL, err := url.Parse(apiURL)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse URL: %w", err)
+ }
+
+ // Create a new URL excluding the path to use as the base URL
+ baseURL := &url.URL{
+ Host: parsedURL.Host,
+ Scheme: parsedURL.Scheme,
+ }
+
+ c.SetBaseURL(baseURL.String())
+
+ versionMatches := regexp.MustCompile(`/v[a-zA-Z0-9]+`).FindAllString(parsedURL.Path, -1)
+
+ // Only set the version if a version is found in the URL, else use the default
+ if len(versionMatches) > 0 {
+ c.SetAPIVersion(
+ strings.Trim(versionMatches[len(versionMatches)-1], "/"),
+ )
+ }
+
+ return c, nil
+}
+
// SetBaseURL sets the base URL of the Linode v4 API (https://api.linode.com/v4)
func (c *Client) SetBaseURL(baseURL string) *Client {
baseURLPath, _ := url.Parse(baseURL)
diff --git a/vendor/github.com/linode/linodego/errors.go b/vendor/github.com/linode/linodego/errors.go
index fc3eb65472..ab9613be91 100644
--- a/vendor/github.com/linode/linodego/errors.go
+++ b/vendor/github.com/linode/linodego/errors.go
@@ -149,3 +149,34 @@ func NewError(err any) *Error {
return &Error{Code: ErrorUnsupported, Message: fmt.Sprintf("Unsupported type to linodego.NewError: %s", reflect.TypeOf(e))}
}
}
+
+// IsNotFound indicates if err indicates a 404 Not Found error from the Linode API.
+func IsNotFound(err error) bool {
+ return ErrHasStatus(err, http.StatusNotFound)
+}
+
+// ErrHasStatus checks if err is an error from the Linode API, and whether it contains the given HTTP status code.
+// More than one status code may be given.
+// If len(code) == 0, err is nil or is not a [Error], ErrHasStatus will return false.
+func ErrHasStatus(err error, code ...int) bool {
+ if err == nil {
+ return false
+ }
+
+ // Short-circuit if the caller did not provide any status codes.
+ if len(code) == 0 {
+ return false
+ }
+
+ var e *Error
+ if !errors.As(err, &e) {
+ return false
+ }
+ ec := e.StatusCode()
+ for _, c := range code {
+ if ec == c {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/linode/linodego/go.work b/vendor/github.com/linode/linodego/go.work
index db3a7dbc0e..193259886e 100644
--- a/vendor/github.com/linode/linodego/go.work
+++ b/vendor/github.com/linode/linodego/go.work
@@ -1,4 +1,4 @@
-go 1.20
+go 1.21
use (
.
diff --git a/vendor/github.com/linode/linodego/go.work.sum b/vendor/github.com/linode/linodego/go.work.sum
index 2eb88716f1..d7582cb503 100644
--- a/vendor/github.com/linode/linodego/go.work.sum
+++ b/vendor/github.com/linode/linodego/go.work.sum
@@ -1,4 +1,6 @@
cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8=
+cloud.google.com/go v0.110.2 h1:sdFPBr6xG9/wkBbfhmUz/JmZC7X6LavQgcrVINrKiVA=
+cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw=
cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA=
cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg=
cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
@@ -54,6 +56,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad h1:E
github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
+github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c=
@@ -72,7 +75,6 @@ github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nA
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -88,12 +90,17 @@ github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPg
github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5 h1:zIaiqGYDQwa4HVx5wGRTXbx38Pqxjemn4BP98wpzpXo=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=
+github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
+github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k=
+github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
+github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4=
+github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
@@ -112,14 +119,12 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL
github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY=
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
-github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
github.com/linode/linodego v0.20.1 h1:Kw5Qes0E0wlKVx5EbITI+F/ambO6G+PQyK0Yi7i4EyQ=
github.com/linode/linodego v0.20.1/go.mod h1:XOWXRHjqeU2uPS84tKLgfWIfTlv3TYzCS0io4GOQzEI=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8=
github.com/maxatome/go-testdeep v1.11.0/go.mod h1:011SgQ6efzZYAen6fDn4BqQ+lUR72ysdyKe7Dyogw70=
-github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
@@ -135,12 +140,10 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo=
github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts=
-github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw=
github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ=
-github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=
@@ -150,7 +153,6 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
-github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
@@ -158,12 +160,16 @@ github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzu
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/yuin/goldmark v1.2.1 h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM=
github.com/yuin/goldmark v1.3.5 h1:dPmz1Snjq0kmkz159iL7S6WzdahUTHnHB5M56WFVifs=
github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
@@ -176,9 +182,8 @@ golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
-golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
-golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
+golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI=
@@ -204,5 +209,16 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o=
+google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao=
+google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64=
+google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM=
+google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
+google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag=
+google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c h1:GohjlNKauSai7gN4wsJkeZ3WAJx4Sh+oT/b5IYn5suA=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
diff --git a/vendor/github.com/linode/linodego/images.go b/vendor/github.com/linode/linodego/images.go
index 539420c50e..6ff8a3a891 100644
--- a/vendor/github.com/linode/linodego/images.go
+++ b/vendor/github.com/linode/linodego/images.go
@@ -33,8 +33,10 @@ type Image struct {
Size int `json:"size"`
IsPublic bool `json:"is_public"`
Deprecated bool `json:"deprecated"`
+ Updated *time.Time `json:"-"`
Created *time.Time `json:"-"`
Expiry *time.Time `json:"-"`
+ EOL *time.Time `json:"-"`
}
// ImageCreateOptions fields are those accepted by CreateImage
@@ -80,8 +82,10 @@ func (i *Image) UnmarshalJSON(b []byte) error {
p := struct {
*Mask
+ Updated *parseabletime.ParseableTime `json:"updated"`
Created *parseabletime.ParseableTime `json:"created"`
Expiry *parseabletime.ParseableTime `json:"expiry"`
+ EOL *parseabletime.ParseableTime `json:"eol"`
}{
Mask: (*Mask)(i),
}
@@ -90,8 +94,10 @@ func (i *Image) UnmarshalJSON(b []byte) error {
return err
}
+ i.Updated = (*time.Time)(p.Updated)
i.Created = (*time.Time)(p.Created)
i.Expiry = (*time.Time)(p.Expiry)
+ i.EOL = (*time.Time)(p.EOL)
return nil
}
diff --git a/vendor/github.com/linode/linodego/instance_ips.go b/vendor/github.com/linode/linodego/instance_ips.go
index beaeb0a214..bb3eb2364f 100644
--- a/vendor/github.com/linode/linodego/instance_ips.go
+++ b/vendor/github.com/linode/linodego/instance_ips.go
@@ -19,6 +19,7 @@ type InstanceIPv4Response struct {
Private []*InstanceIP `json:"private"`
Shared []*InstanceIP `json:"shared"`
Reserved []*InstanceIP `json:"reserved"`
+ VPC []*VPCIP `json:"vpc"`
}
// InstanceIP represents an Instance IP with additional DNS and networking details
@@ -35,6 +36,23 @@ type InstanceIP struct {
VPCNAT1To1 *InstanceIPNAT1To1 `json:"vpc_nat_1_1"`
}
+// VPCIP represents a private IP address in a VPC subnet with additional networking details
+type VPCIP struct {
+ Address *string `json:"address"`
+ AddressRange *string `json:"address_range"`
+ Gateway string `json:"gateway"`
+ SubnetMask string `json:"subnet_mask"`
+ Prefix int `json:"prefix"`
+ LinodeID int `json:"linode_id"`
+ Region string `json:"region"`
+ Active bool `json:"active"`
+ NAT1To1 *string `json:"nat_1_1"`
+ VPCID int `json:"vpc_id"`
+ SubnetID int `json:"subnet_id"`
+ ConfigID int `json:"config_id"`
+ InterfaceID int `json:"interface_id"`
+}
+
// InstanceIPv6Response contains the IPv6 addresses and ranges for an Instance
type InstanceIPv6Response struct {
LinkLocal *InstanceIP `json:"link_local"`
diff --git a/vendor/github.com/linode/linodego/regions.go b/vendor/github.com/linode/linodego/regions.go
index f4210383e2..3b0caf8a50 100644
--- a/vendor/github.com/linode/linodego/regions.go
+++ b/vendor/github.com/linode/linodego/regions.go
@@ -21,6 +21,7 @@ type Region struct {
Status string `json:"status"`
Resolvers RegionResolvers `json:"resolvers"`
Label string `json:"label"`
+ SiteType string `json:"site_type"`
}
// RegionResolvers contains the DNS resolvers of a region
diff --git a/vendor/github.com/linode/linodego/vpc_ips.go b/vendor/github.com/linode/linodego/vpc_ips.go
new file mode 100644
index 0000000000..7ccef887e9
--- /dev/null
+++ b/vendor/github.com/linode/linodego/vpc_ips.go
@@ -0,0 +1,20 @@
+package linodego
+
+import (
+ "context"
+ "fmt"
+)
+
+// ListAllVPCIPAddresses gets the list of all IP addresses of all VPCs in the Linode account.
+func (c *Client) ListAllVPCIPAddresses(
+ ctx context.Context, opts *ListOptions,
+) ([]VPCIP, error) {
+ return getPaginatedResults[VPCIP](ctx, c, "vpcs/ips", opts)
+}
+
+// ListVPCIPAddresses gets the list of all IP addresses of a specific VPC.
+func (c *Client) ListVPCIPAddresses(
+ ctx context.Context, vpcID int, opts *ListOptions,
+) ([]VPCIP, error) {
+ return getPaginatedResults[VPCIP](ctx, c, fmt.Sprintf("vpcs/%d/ips", vpcID), opts)
+}
diff --git a/vendor/github.com/magiconair/properties/.gitignore b/vendor/github.com/magiconair/properties/.gitignore
new file mode 100644
index 0000000000..e7081ff522
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/.gitignore
@@ -0,0 +1,6 @@
+*.sublime-project
+*.sublime-workspace
+*.un~
+*.swp
+.idea/
+*.iml
diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md
new file mode 100644
index 0000000000..842e8e24fb
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/CHANGELOG.md
@@ -0,0 +1,205 @@
+## Changelog
+
+### [1.8.7](https://github.com/magiconair/properties/tree/v1.8.7) - 08 Dec 2022
+
+ * [PR #65](https://github.com/magiconair/properties/pull/65): Speedup Merge
+
+ Thanks to [@AdityaVallabh](https://github.com/AdityaVallabh) for the patch.
+
+ * [PR #66](https://github.com/magiconair/properties/pull/66): use github actions
+
+### [1.8.6](https://github.com/magiconair/properties/tree/v1.8.6) - 23 Feb 2022
+
+ * [PR #57](https://github.com/magiconair/properties/pull/57):Fix "unreachable code" lint error
+
+ Thanks to [@ellie](https://github.com/ellie) for the patch.
+
+ * [PR #63](https://github.com/magiconair/properties/pull/63): Make TestMustGetParsedDuration backwards compatible
+
+ This patch ensures that the `TestMustGetParsedDuration` still works with `go1.3` to make the
+ author happy until it affects real users.
+
+ Thanks to [@maage](https://github.com/maage) for the patch.
+
+### [1.8.5](https://github.com/magiconair/properties/tree/v1.8.5) - 24 Mar 2021
+
+ * [PR #55](https://github.com/magiconair/properties/pull/55): Fix: Encoding Bug in Comments
+
+ When reading comments \ are loaded correctly, but when writing they are then
+ replaced by \\. This leads to wrong comments when writing and reading multiple times.
+
+ Thanks to [@doxsch](https://github.com/doxsch) for the patch.
+
+### [1.8.4](https://github.com/magiconair/properties/tree/v1.8.4) - 23 Sep 2020
+
+ * [PR #50](https://github.com/magiconair/properties/pull/50): enhance error message for circular references
+
+ Thanks to [@sriv](https://github.com/sriv) for the patch.
+
+### [1.8.3](https://github.com/magiconair/properties/tree/v1.8.3) - 14 Sep 2020
+
+ * [PR #49](https://github.com/magiconair/properties/pull/49): Include the key in error message causing the circular reference
+
+ The change is include the key in the error message which is causing the circular
+ reference when parsing/loading the properties files.
+
+ Thanks to [@haroon-sheikh](https://github.com/haroon-sheikh) for the patch.
+
+### [1.8.2](https://github.com/magiconair/properties/tree/v1.8.2) - 25 Aug 2020
+
+ * [PR #36](https://github.com/magiconair/properties/pull/36): Escape backslash on write
+
+ This patch ensures that backslashes are escaped on write. Existing applications which
+ rely on the old behavior may need to be updated.
+
+ Thanks to [@apesternikov](https://github.com/apesternikov) for the patch.
+
+ * [PR #42](https://github.com/magiconair/properties/pull/42): Made Content-Type check whitespace agnostic in LoadURL()
+
+ Thanks to [@aliras1](https://github.com/aliras1) for the patch.
+
+ * [PR #41](https://github.com/magiconair/properties/pull/41): Make key/value separator configurable on Write()
+
+ Thanks to [@mkjor](https://github.com/mkjor) for the patch.
+
+ * [PR #40](https://github.com/magiconair/properties/pull/40): Add method to return a sorted list of keys
+
+ Thanks to [@mkjor](https://github.com/mkjor) for the patch.
+
+### [1.8.1](https://github.com/magiconair/properties/tree/v1.8.1) - 10 May 2019
+
+ * [PR #35](https://github.com/magiconair/properties/pull/35): Close body always after request
+
+ This patch ensures that in `LoadURL` the response body is always closed.
+
+ Thanks to [@liubog2008](https://github.com/liubog2008) for the patch.
+
+### [1.8](https://github.com/magiconair/properties/tree/v1.8) - 15 May 2018
+
+ * [PR #26](https://github.com/magiconair/properties/pull/26): Disable expansion during loading
+
+ This adds the option to disable property expansion during loading.
+
+ Thanks to [@kmala](https://github.com/kmala) for the patch.
+
+### [1.7.6](https://github.com/magiconair/properties/tree/v1.7.6) - 14 Feb 2018
+
+ * [PR #29](https://github.com/magiconair/properties/pull/29): Reworked expansion logic to handle more complex cases.
+
+ See PR for an example.
+
+ Thanks to [@yobert](https://github.com/yobert) for the fix.
+
+### [1.7.5](https://github.com/magiconair/properties/tree/v1.7.5) - 13 Feb 2018
+
+ * [PR #28](https://github.com/magiconair/properties/pull/28): Support duplicate expansions in the same value
+
+ Values which expand the same key multiple times (e.g. `key=${a} ${a}`) will no longer fail
+ with a `circular reference error`.
+
+ Thanks to [@yobert](https://github.com/yobert) for the fix.
+
+### [1.7.4](https://github.com/magiconair/properties/tree/v1.7.4) - 31 Oct 2017
+
+ * [Issue #23](https://github.com/magiconair/properties/issues/23): Ignore blank lines with whitespaces
+
+ * [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled
+
+ Thanks to [@mgurov](https://github.com/mgurov) for the fix.
+
+### [1.7.3](https://github.com/magiconair/properties/tree/v1.7.3) - 10 Jul 2017
+
+ * [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically
+ * [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map
+
+### [1.7.2](https://github.com/magiconair/properties/tree/v1.7.2) - 20 Mar 2017
+
+ * [Issue #15](https://github.com/magiconair/properties/issues/15): Drop gocheck dependency
+ * [PR #21](https://github.com/magiconair/properties/pull/21): Add [Map()](http://godoc.org/github.com/magiconair/properties#Properties.Map) and [FilterFunc()](http://godoc.org/github.com/magiconair/properties#Properties.FilterFunc)
+
+### [1.7.1](https://github.com/magiconair/properties/tree/v1.7.1) - 13 Jan 2017
+
+ * [Issue #14](https://github.com/magiconair/properties/issues/14): Decouple TestLoadExpandedFile from `$USER`
+ * [PR #12](https://github.com/magiconair/properties/pull/12): Load from files and URLs
+ * [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy
+ * [PR #18](https://github.com/magiconair/properties/pull/18): Fix Delete() function
+
+### [1.7.0](https://github.com/magiconair/properties/tree/v1.7.0) - 20 Mar 2016
+
+ * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#LoadURL) method to load properties from a URL.
+ * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#LoadString) method to load properties from an UTF8 string.
+ * [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe)
+
+### [1.6.0](https://github.com/magiconair/properties/tree/v1.6.0) - 11 Dec 2015
+
+ * Add [Decode](http://godoc.org/github.com/magiconair/properties#Properties.Decode) method to populate struct from properties via tags.
+
+### [1.5.6](https://github.com/magiconair/properties/tree/v1.5.6) - 18 Oct 2015
+
+ * Vendored in gopkg.in/check.v1
+
+### [1.5.5](https://github.com/magiconair/properties/tree/v1.5.5) - 31 Jul 2015
+
+ * [PR #6](https://github.com/magiconair/properties/pull/6): Add [Delete](http://godoc.org/github.com/magiconair/properties#Properties.Delete) method to remove keys including comments. (@gerbenjacobs)
+
+### [1.5.4](https://github.com/magiconair/properties/tree/v1.5.4) - 23 Jun 2015
+
+ * [Issue #5](https://github.com/magiconair/properties/issues/5): Allow disabling of property expansion [DisableExpansion](http://godoc.org/github.com/magiconair/properties#Properties.DisableExpansion). When property expansion is disabled Properties become a simple key/value store and don't check for circular references.
+
+### [1.5.3](https://github.com/magiconair/properties/tree/v1.5.3) - 02 Jun 2015
+
+ * [Issue #4](https://github.com/magiconair/properties/issues/4): Maintain key order in [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) and [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp)
+
+### [1.5.2](https://github.com/magiconair/properties/tree/v1.5.2) - 10 Apr 2015
+
+ * [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty
+ * Add clickable links to README
+
+### [1.5.1](https://github.com/magiconair/properties/tree/v1.5.1) - 08 Dec 2014
+
+ * Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with
+ [time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration).
+
+### [1.5.0](https://github.com/magiconair/properties/tree/v1.5.0) - 18 Nov 2014
+
+ * Added support for single and multi-line comments (reading, writing and updating)
+ * The order of keys is now preserved
+ * Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry
+ * Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method
+ * Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1)
+
+### [1.4.2](https://github.com/magiconair/properties/tree/v1.4.2) - 15 Nov 2014
+
+ * [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one
+
+### [1.4.1](https://github.com/magiconair/properties/tree/v1.4.1) - 13 Nov 2014
+
+ * [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string
+
+### [1.4.0](https://github.com/magiconair/properties/tree/v1.4.0) - 23 Sep 2014
+
+ * Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys
+ * Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties
+
+### [1.3.0](https://github.com/magiconair/properties/tree/v1.3.0) - 18 Mar 2014
+
+* Added support for time.Duration
+* Made MustXXX() failure beha[ior configurable (log.Fatal, panic](https://github.com/magiconair/properties/tree/vior configurable (log.Fatal, panic) - custom)
+* Changed default of MustXXX() failure from panic to log.Fatal
+
+### [1.2.0](https://github.com/magiconair/properties/tree/v1.2.0) - 05 Mar 2014
+
+* Added MustGet... functions
+* Added support for int and uint with range checks on 32 bit platforms
+
+### [1.1.0](https://github.com/magiconair/properties/tree/v1.1.0) - 20 Jan 2014
+
+* Renamed from goproperties to properties
+* Added support for expansion of environment vars in
+ filenames and value expressions
+* Fixed bug where value expressions were not at the
+ start of the string
+
+### [1.0.0](https://github.com/magiconair/properties/tree/v1.0.0) - 7 Jan 2014
+
+* Initial release
diff --git a/vendor/github.com/magiconair/properties/LICENSE.md b/vendor/github.com/magiconair/properties/LICENSE.md
new file mode 100644
index 0000000000..79c87e3e6f
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/LICENSE.md
@@ -0,0 +1,24 @@
+Copyright (c) 2013-2020, Frank Schroeder
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md
new file mode 100644
index 0000000000..e2edda025b
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/README.md
@@ -0,0 +1,128 @@
+[![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases)
+[![Travis CI Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square&label=travis)](https://travis-ci.org/magiconair/properties)
+[![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://raw.githubusercontent.com/magiconair/properties/master/LICENSE)
+[![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties)
+
+# Overview
+
+#### Please run `git pull --tags` to update the tags. See [below](#updated-git-tags) why.
+
+properties is a Go library for reading and writing properties files.
+
+It supports reading from multiple files or URLs and Spring style recursive
+property expansion of expressions like `${key}` to their corresponding value.
+Value expressions can refer to other keys like in `${key}` or to environment
+variables like in `${USER}`. Filenames can also contain environment variables
+like in `/home/${USER}/myapp.properties`.
+
+Properties can be decoded into structs, maps, arrays and values through
+struct tags.
+
+Comments and the order of keys are preserved. Comments can be modified
+and can be written to the output.
+
+The properties library supports both ISO-8859-1 and UTF-8 encoded data.
+
+Starting from version 1.3.0 the behavior of the MustXXX() functions is
+configurable by providing a custom `ErrorHandler` function. The default has
+changed from `panic` to `log.Fatal` but this is configurable and custom
+error handling functions can be provided. See the package documentation for
+details.
+
+Read the full documentation on [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties)
+
+## Getting Started
+
+```go
+import (
+ "flag"
+ "github.com/magiconair/properties"
+)
+
+func main() {
+ // init from a file
+ p := properties.MustLoadFile("${HOME}/config.properties", properties.UTF8)
+
+ // or multiple files
+ p = properties.MustLoadFiles([]string{
+ "${HOME}/config.properties",
+ "${HOME}/config-${USER}.properties",
+ }, properties.UTF8, true)
+
+ // or from a map
+ p = properties.LoadMap(map[string]string{"key": "value", "abc": "def"})
+
+ // or from a string
+ p = properties.MustLoadString("key=value\nabc=def")
+
+ // or from a URL
+ p = properties.MustLoadURL("http://host/path")
+
+ // or from multiple URLs
+ p = properties.MustLoadURL([]string{
+ "http://host/config",
+ "http://host/config-${USER}",
+ }, true)
+
+ // or from flags
+ p.MustFlag(flag.CommandLine)
+
+ // get values through getters
+ host := p.MustGetString("host")
+ port := p.GetInt("port", 8080)
+
+ // or through Decode
+ type Config struct {
+ Host string `properties:"host"`
+ Port int `properties:"port,default=9000"`
+ Accept []string `properties:"accept,default=image/png;image;gif"`
+ Timeout time.Duration `properties:"timeout,default=5s"`
+ }
+ var cfg Config
+ if err := p.Decode(&cfg); err != nil {
+ log.Fatal(err)
+ }
+}
+
+```
+
+## Installation and Upgrade
+
+```
+$ go get -u github.com/magiconair/properties
+```
+
+## License
+
+2 clause BSD license. See [LICENSE](https://github.com/magiconair/properties/blob/master/LICENSE) file for details.
+
+## ToDo
+
+* Dump contents with passwords and secrets obscured
+
+## Updated Git tags
+
+#### 13 Feb 2018
+
+I realized that all of the git tags I had pushed before v1.7.5 were lightweight tags
+and I've only recently learned that this doesn't play well with `git describe` 😞
+
+I have replaced all lightweight tags with signed tags using this script which should
+retain the commit date, name and email address. Please run `git pull --tags` to update them.
+
+Worst case you have to reclone the repo.
+
+```shell
+#!/bin/bash
+tag=$1
+echo "Updating $tag"
+date=$(git show ${tag}^0 --format=%aD | head -1)
+email=$(git show ${tag}^0 --format=%aE | head -1)
+name=$(git show ${tag}^0 --format=%aN | head -1)
+GIT_COMMITTER_DATE="$date" GIT_COMMITTER_NAME="$name" GIT_COMMITTER_EMAIL="$email" git tag -s -f ${tag} ${tag}^0 -m ${tag}
+```
+
+I apologize for the inconvenience.
+
+Frank
+
diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go
new file mode 100644
index 0000000000..8e6aa441d9
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/decode.go
@@ -0,0 +1,289 @@
+// Copyright 2013-2022 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Decode assigns property values to exported fields of a struct.
+//
+// Decode traverses v recursively and returns an error if a value cannot be
+// converted to the field type or a required value is missing for a field.
+//
+// The following type dependent decodings are used:
+//
+// String, boolean, numeric fields have the value of the property key assigned.
+// The property key name is the name of the field. A different key and a default
+// value can be set in the field's tag. Fields without default value are
+// required. If the value cannot be converted to the field type an error is
+// returned.
+//
+// time.Duration fields have the result of time.ParseDuration() assigned.
+//
+// time.Time fields have the vaule of time.Parse() assigned. The default layout
+// is time.RFC3339 but can be set in the field's tag.
+//
+// Arrays and slices of string, boolean, numeric, time.Duration and time.Time
+// fields have the value interpreted as a comma separated list of values. The
+// individual values are trimmed of whitespace and empty values are ignored. A
+// default value can be provided as a semicolon separated list in the field's
+// tag.
+//
+// Struct fields are decoded recursively using the field name plus "." as
+// prefix. The prefix (without dot) can be overridden in the field's tag.
+// Default values are not supported in the field's tag. Specify them on the
+// fields of the inner struct instead.
+//
+// Map fields must have a key of type string and are decoded recursively by
+// using the field's name plus ".' as prefix and the next element of the key
+// name as map key. The prefix (without dot) can be overridden in the field's
+// tag. Default values are not supported.
+//
+// Examples:
+//
+// // Field is ignored.
+// Field int `properties:"-"`
+//
+// // Field is assigned value of 'Field'.
+// Field int
+//
+// // Field is assigned value of 'myName'.
+// Field int `properties:"myName"`
+//
+// // Field is assigned value of key 'myName' and has a default
+// // value 15 if the key does not exist.
+// Field int `properties:"myName,default=15"`
+//
+// // Field is assigned value of key 'Field' and has a default
+// // value 15 if the key does not exist.
+// Field int `properties:",default=15"`
+//
+// // Field is assigned value of key 'date' and the date
+// // is in format 2006-01-02
+// Field time.Time `properties:"date,layout=2006-01-02"`
+//
+// // Field is assigned the non-empty and whitespace trimmed
+// // values of key 'Field' split by commas.
+// Field []string
+//
+// // Field is assigned the non-empty and whitespace trimmed
+// // values of key 'Field' split by commas and has a default
+// // value ["a", "b", "c"] if the key does not exist.
+// Field []string `properties:",default=a;b;c"`
+//
+// // Field is decoded recursively with "Field." as key prefix.
+// Field SomeStruct
+//
+// // Field is decoded recursively with "myName." as key prefix.
+// Field SomeStruct `properties:"myName"`
+//
+// // Field is decoded recursively with "Field." as key prefix
+// // and the next dotted element of the key as map key.
+// Field map[string]string
+//
+// // Field is decoded recursively with "myName." as key prefix
+// // and the next dotted element of the key as map key.
+// Field map[string]string `properties:"myName"`
+func (p *Properties) Decode(x interface{}) error {
+ t, v := reflect.TypeOf(x), reflect.ValueOf(x)
+ if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct {
+ return fmt.Errorf("not a pointer to struct: %s", t)
+ }
+ if err := dec(p, "", nil, nil, v); err != nil {
+ return err
+ }
+ return nil
+}
+
+func dec(p *Properties, key string, def *string, opts map[string]string, v reflect.Value) error {
+ t := v.Type()
+
+ // value returns the property value for key or the default if provided.
+ value := func() (string, error) {
+ if val, ok := p.Get(key); ok {
+ return val, nil
+ }
+ if def != nil {
+ return *def, nil
+ }
+ return "", fmt.Errorf("missing required key %s", key)
+ }
+
+ // conv converts a string to a value of the given type.
+ conv := func(s string, t reflect.Type) (val reflect.Value, err error) {
+ var v interface{}
+
+ switch {
+ case isDuration(t):
+ v, err = time.ParseDuration(s)
+
+ case isTime(t):
+ layout := opts["layout"]
+ if layout == "" {
+ layout = time.RFC3339
+ }
+ v, err = time.Parse(layout, s)
+
+ case isBool(t):
+ v, err = boolVal(s), nil
+
+ case isString(t):
+ v, err = s, nil
+
+ case isFloat(t):
+ v, err = strconv.ParseFloat(s, 64)
+
+ case isInt(t):
+ v, err = strconv.ParseInt(s, 10, 64)
+
+ case isUint(t):
+ v, err = strconv.ParseUint(s, 10, 64)
+
+ default:
+ return reflect.Zero(t), fmt.Errorf("unsupported type %s", t)
+ }
+ if err != nil {
+ return reflect.Zero(t), err
+ }
+ return reflect.ValueOf(v).Convert(t), nil
+ }
+
+ // keydef returns the property key and the default value based on the
+ // name of the struct field and the options in the tag.
+ keydef := func(f reflect.StructField) (string, *string, map[string]string) {
+ _key, _opts := parseTag(f.Tag.Get("properties"))
+
+ var _def *string
+ if d, ok := _opts["default"]; ok {
+ _def = &d
+ }
+ if _key != "" {
+ return _key, _def, _opts
+ }
+ return f.Name, _def, _opts
+ }
+
+ switch {
+ case isDuration(t) || isTime(t) || isBool(t) || isString(t) || isFloat(t) || isInt(t) || isUint(t):
+ s, err := value()
+ if err != nil {
+ return err
+ }
+ val, err := conv(s, t)
+ if err != nil {
+ return err
+ }
+ v.Set(val)
+
+ case isPtr(t):
+ return dec(p, key, def, opts, v.Elem())
+
+ case isStruct(t):
+ for i := 0; i < v.NumField(); i++ {
+ fv := v.Field(i)
+ fk, def, opts := keydef(t.Field(i))
+ if !fv.CanSet() {
+ return fmt.Errorf("cannot set %s", t.Field(i).Name)
+ }
+ if fk == "-" {
+ continue
+ }
+ if key != "" {
+ fk = key + "." + fk
+ }
+ if err := dec(p, fk, def, opts, fv); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case isArray(t):
+ val, err := value()
+ if err != nil {
+ return err
+ }
+ vals := split(val, ";")
+ a := reflect.MakeSlice(t, 0, len(vals))
+ for _, s := range vals {
+ val, err := conv(s, t.Elem())
+ if err != nil {
+ return err
+ }
+ a = reflect.Append(a, val)
+ }
+ v.Set(a)
+
+ case isMap(t):
+ valT := t.Elem()
+ m := reflect.MakeMap(t)
+ for postfix := range p.FilterStripPrefix(key + ".").m {
+ pp := strings.SplitN(postfix, ".", 2)
+ mk, mv := pp[0], reflect.New(valT)
+ if err := dec(p, key+"."+mk, nil, nil, mv); err != nil {
+ return err
+ }
+ m.SetMapIndex(reflect.ValueOf(mk), mv.Elem())
+ }
+ v.Set(m)
+
+ default:
+ return fmt.Errorf("unsupported type %s", t)
+ }
+ return nil
+}
+
+// split splits a string on sep, trims whitespace of elements
+// and omits empty elements
+func split(s string, sep string) []string {
+ var a []string
+ for _, v := range strings.Split(s, sep) {
+ if v = strings.TrimSpace(v); v != "" {
+ a = append(a, v)
+ }
+ }
+ return a
+}
+
+// parseTag parses a "key,k=v,k=v,..."
+func parseTag(tag string) (key string, opts map[string]string) {
+ opts = map[string]string{}
+ for i, s := range strings.Split(tag, ",") {
+ if i == 0 {
+ key = s
+ continue
+ }
+
+ pp := strings.SplitN(s, "=", 2)
+ if len(pp) == 1 {
+ opts[pp[0]] = ""
+ } else {
+ opts[pp[0]] = pp[1]
+ }
+ }
+ return key, opts
+}
+
+func isArray(t reflect.Type) bool { return t.Kind() == reflect.Array || t.Kind() == reflect.Slice }
+func isBool(t reflect.Type) bool { return t.Kind() == reflect.Bool }
+func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) }
+func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map }
+func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr }
+func isString(t reflect.Type) bool { return t.Kind() == reflect.String }
+func isStruct(t reflect.Type) bool { return t.Kind() == reflect.Struct }
+func isTime(t reflect.Type) bool { return t == reflect.TypeOf(time.Time{}) }
+func isFloat(t reflect.Type) bool {
+ return t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64
+}
+func isInt(t reflect.Type) bool {
+ return t.Kind() == reflect.Int || t.Kind() == reflect.Int8 || t.Kind() == reflect.Int16 || t.Kind() == reflect.Int32 || t.Kind() == reflect.Int64
+}
+func isUint(t reflect.Type) bool {
+ return t.Kind() == reflect.Uint || t.Kind() == reflect.Uint8 || t.Kind() == reflect.Uint16 || t.Kind() == reflect.Uint32 || t.Kind() == reflect.Uint64
+}
diff --git a/vendor/github.com/magiconair/properties/doc.go b/vendor/github.com/magiconair/properties/doc.go
new file mode 100644
index 0000000000..7c79793159
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/doc.go
@@ -0,0 +1,155 @@
+// Copyright 2013-2022 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package properties provides functions for reading and writing
+// ISO-8859-1 and UTF-8 encoded .properties files and has
+// support for recursive property expansion.
+//
+// Java properties files are ISO-8859-1 encoded and use Unicode
+// literals for characters outside the ISO character set. Unicode
+// literals can be used in UTF-8 encoded properties files but
+// aren't necessary.
+//
+// To load a single properties file use MustLoadFile():
+//
+// p := properties.MustLoadFile(filename, properties.UTF8)
+//
+// To load multiple properties files use MustLoadFiles()
+// which loads the files in the given order and merges the
+// result. Missing properties files can be ignored if the
+// 'ignoreMissing' flag is set to true.
+//
+// Filenames can contain environment variables which are expanded
+// before loading.
+//
+// f1 := "/etc/myapp/myapp.conf"
+// f2 := "/home/${USER}/myapp.conf"
+// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true)
+//
+// All of the different key/value delimiters ' ', ':' and '=' are
+// supported as well as the comment characters '!' and '#' and
+// multi-line values.
+//
+// ! this is a comment
+// # and so is this
+//
+// # the following expressions are equal
+// key value
+// key=value
+// key:value
+// key = value
+// key : value
+// key = val\
+// ue
+//
+// Properties stores all comments preceding a key and provides
+// GetComments() and SetComments() methods to retrieve and
+// update them. The convenience functions GetComment() and
+// SetComment() allow access to the last comment. The
+// WriteComment() method writes properties files including
+// the comments and with the keys in the original order.
+// This can be used for sanitizing properties files.
+//
+// Property expansion is recursive and circular references
+// and malformed expressions are not allowed and cause an
+// error. Expansion of environment variables is supported.
+//
+// # standard property
+// key = value
+//
+// # property expansion: key2 = value
+// key2 = ${key}
+//
+// # recursive expansion: key3 = value
+// key3 = ${key2}
+//
+// # circular reference (error)
+// key = ${key}
+//
+// # malformed expression (error)
+// key = ${ke
+//
+// # refers to the users' home dir
+// home = ${HOME}
+//
+// # local key takes precedence over env var: u = foo
+// USER = foo
+// u = ${USER}
+//
+// The default property expansion format is ${key} but can be
+// changed by setting different pre- and postfix values on the
+// Properties object.
+//
+// p := properties.NewProperties()
+// p.Prefix = "#["
+// p.Postfix = "]#"
+//
+// Properties provides convenience functions for getting typed
+// values with default values if the key does not exist or the
+// type conversion failed.
+//
+// # Returns true if the value is either "1", "on", "yes" or "true"
+// # Returns false for every other value and the default value if
+// # the key does not exist.
+// v = p.GetBool("key", false)
+//
+// # Returns the value if the key exists and the format conversion
+// # was successful. Otherwise, the default value is returned.
+// v = p.GetInt64("key", 999)
+// v = p.GetUint64("key", 999)
+// v = p.GetFloat64("key", 123.0)
+// v = p.GetString("key", "def")
+// v = p.GetDuration("key", 999)
+//
+// As an alternative properties may be applied with the standard
+// library's flag implementation at any time.
+//
+// # Standard configuration
+// v = flag.Int("key", 999, "help message")
+// flag.Parse()
+//
+// # Merge p into the flag set
+// p.MustFlag(flag.CommandLine)
+//
+// Properties provides several MustXXX() convenience functions
+// which will terminate the app if an error occurs. The behavior
+// of the failure is configurable and the default is to call
+// log.Fatal(err). To have the MustXXX() functions panic instead
+// of logging the error set a different ErrorHandler before
+// you use the Properties package.
+//
+// properties.ErrorHandler = properties.PanicHandler
+//
+// # Will panic instead of logging an error
+// p := properties.MustLoadFile("config.properties")
+//
+// You can also provide your own ErrorHandler function. The only requirement
+// is that the error handler function must exit after handling the error.
+//
+// properties.ErrorHandler = func(err error) {
+// fmt.Println(err)
+// os.Exit(1)
+// }
+//
+// # Will write to stdout and then exit
+// p := properties.MustLoadFile("config.properties")
+//
+// Properties can also be loaded into a struct via the `Decode`
+// method, e.g.
+//
+// type S struct {
+// A string `properties:"a,default=foo"`
+// D time.Duration `properties:"timeout,default=5s"`
+// E time.Time `properties:"expires,layout=2006-01-02,default=2015-01-01"`
+// }
+//
+// See `Decode()` method for the full documentation.
+//
+// The following documents provide a description of the properties
+// file format.
+//
+// http://en.wikipedia.org/wiki/.properties
+//
+// http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29
+package properties
diff --git a/vendor/github.com/magiconair/properties/integrate.go b/vendor/github.com/magiconair/properties/integrate.go
new file mode 100644
index 0000000000..35d0ae97b3
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/integrate.go
@@ -0,0 +1,35 @@
+// Copyright 2013-2022 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import "flag"
+
+// MustFlag sets flags that are skipped by dst.Parse when p contains
+// the respective key for flag.Flag.Name.
+//
+// It's use is recommended with command line arguments as in:
+//
+// flag.Parse()
+// p.MustFlag(flag.CommandLine)
+func (p *Properties) MustFlag(dst *flag.FlagSet) {
+ m := make(map[string]*flag.Flag)
+ dst.VisitAll(func(f *flag.Flag) {
+ m[f.Name] = f
+ })
+ dst.Visit(func(f *flag.Flag) {
+ delete(m, f.Name) // overridden
+ })
+
+ for name, f := range m {
+ v, ok := p.Get(name)
+ if !ok {
+ continue
+ }
+
+ if err := f.Value.Set(v); err != nil {
+ ErrorHandler(err)
+ }
+ }
+}
diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go
new file mode 100644
index 0000000000..3d15a1f6ed
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/lex.go
@@ -0,0 +1,395 @@
+// Copyright 2013-2022 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// Parts of the lexer are from the template/text/parser package
+// For these parts the following applies:
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file of the go 1.2
+// distribution.
+
+package properties
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// item represents a token or text string returned from the scanner.
+type item struct {
+ typ itemType // The type of this item.
+ pos int // The starting position, in bytes, of this item in the input string.
+ val string // The value of this item.
+}
+
+func (i item) String() string {
+ switch {
+ case i.typ == itemEOF:
+ return "EOF"
+ case i.typ == itemError:
+ return i.val
+ case len(i.val) > 10:
+ return fmt.Sprintf("%.10q...", i.val)
+ }
+ return fmt.Sprintf("%q", i.val)
+}
+
+// itemType identifies the type of lex items.
+type itemType int
+
+const (
+ itemError itemType = iota // error occurred; value is text of error
+ itemEOF
+ itemKey // a key
+ itemValue // a value
+ itemComment // a comment
+)
+
+// defines a constant for EOF
+const eof = -1
+
+// permitted whitespace characters space, FF and TAB
+const whitespace = " \f\t"
+
+// stateFn represents the state of the scanner as a function that returns the next state.
+type stateFn func(*lexer) stateFn
+
+// lexer holds the state of the scanner.
+type lexer struct {
+ input string // the string being scanned
+ state stateFn // the next lexing function to enter
+ pos int // current position in the input
+ start int // start position of this item
+ width int // width of last rune read from input
+ lastPos int // position of most recent item returned by nextItem
+ runes []rune // scanned runes for this item
+ items chan item // channel of scanned items
+}
+
+// next returns the next rune in the input.
+func (l *lexer) next() rune {
+ if l.pos >= len(l.input) {
+ l.width = 0
+ return eof
+ }
+ r, w := utf8.DecodeRuneInString(l.input[l.pos:])
+ l.width = w
+ l.pos += l.width
+ return r
+}
+
+// peek returns but does not consume the next rune in the input.
+func (l *lexer) peek() rune {
+ r := l.next()
+ l.backup()
+ return r
+}
+
+// backup steps back one rune. Can only be called once per call of next.
+func (l *lexer) backup() {
+ l.pos -= l.width
+}
+
+// emit passes an item back to the client.
+func (l *lexer) emit(t itemType) {
+ i := item{t, l.start, string(l.runes)}
+ l.items <- i
+ l.start = l.pos
+ l.runes = l.runes[:0]
+}
+
+// ignore skips over the pending input before this point.
+func (l *lexer) ignore() {
+ l.start = l.pos
+}
+
+// appends the rune to the current value
+func (l *lexer) appendRune(r rune) {
+ l.runes = append(l.runes, r)
+}
+
+// accept consumes the next rune if it's from the valid set.
+func (l *lexer) accept(valid string) bool {
+ if strings.ContainsRune(valid, l.next()) {
+ return true
+ }
+ l.backup()
+ return false
+}
+
+// acceptRun consumes a run of runes from the valid set.
+func (l *lexer) acceptRun(valid string) {
+ for strings.ContainsRune(valid, l.next()) {
+ }
+ l.backup()
+}
+
+// lineNumber reports which line we're on, based on the position of
+// the previous item returned by nextItem. Doing it this way
+// means we don't have to worry about peek double counting.
+func (l *lexer) lineNumber() int {
+ return 1 + strings.Count(l.input[:l.lastPos], "\n")
+}
+
+// errorf returns an error token and terminates the scan by passing
+// back a nil pointer that will be the next state, terminating l.nextItem.
+func (l *lexer) errorf(format string, args ...interface{}) stateFn {
+ l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
+ return nil
+}
+
+// nextItem returns the next item from the input.
+func (l *lexer) nextItem() item {
+ i := <-l.items
+ l.lastPos = i.pos
+ return i
+}
+
+// lex creates a new scanner for the input string.
+func lex(input string) *lexer {
+ l := &lexer{
+ input: input,
+ items: make(chan item),
+ runes: make([]rune, 0, 32),
+ }
+ go l.run()
+ return l
+}
+
+// run runs the state machine for the lexer.
+func (l *lexer) run() {
+ for l.state = lexBeforeKey(l); l.state != nil; {
+ l.state = l.state(l)
+ }
+}
+
+// state functions
+
+// lexBeforeKey scans until a key begins.
+func lexBeforeKey(l *lexer) stateFn {
+ switch r := l.next(); {
+ case isEOF(r):
+ l.emit(itemEOF)
+ return nil
+
+ case isEOL(r):
+ l.ignore()
+ return lexBeforeKey
+
+ case isComment(r):
+ return lexComment
+
+ case isWhitespace(r):
+ l.ignore()
+ return lexBeforeKey
+
+ default:
+ l.backup()
+ return lexKey
+ }
+}
+
+// lexComment scans a comment line. The comment character has already been scanned.
+func lexComment(l *lexer) stateFn {
+ l.acceptRun(whitespace)
+ l.ignore()
+ for {
+ switch r := l.next(); {
+ case isEOF(r):
+ l.ignore()
+ l.emit(itemEOF)
+ return nil
+ case isEOL(r):
+ l.emit(itemComment)
+ return lexBeforeKey
+ default:
+ l.appendRune(r)
+ }
+ }
+}
+
+// lexKey scans the key up to a delimiter
+func lexKey(l *lexer) stateFn {
+ var r rune
+
+Loop:
+ for {
+ switch r = l.next(); {
+
+ case isEscape(r):
+ err := l.scanEscapeSequence()
+ if err != nil {
+ return l.errorf(err.Error())
+ }
+
+ case isEndOfKey(r):
+ l.backup()
+ break Loop
+
+ case isEOF(r):
+ break Loop
+
+ default:
+ l.appendRune(r)
+ }
+ }
+
+ if len(l.runes) > 0 {
+ l.emit(itemKey)
+ }
+
+ if isEOF(r) {
+ l.emit(itemEOF)
+ return nil
+ }
+
+ return lexBeforeValue
+}
+
+// lexBeforeValue scans the delimiter between key and value.
+// Leading and trailing whitespace is ignored.
+// We expect to be just after the key.
+func lexBeforeValue(l *lexer) stateFn {
+ l.acceptRun(whitespace)
+ l.accept(":=")
+ l.acceptRun(whitespace)
+ l.ignore()
+ return lexValue
+}
+
+// lexValue scans text until the end of the line. We expect to be just after the delimiter.
+func lexValue(l *lexer) stateFn {
+ for {
+ switch r := l.next(); {
+ case isEscape(r):
+ if isEOL(l.peek()) {
+ l.next()
+ l.acceptRun(whitespace)
+ } else {
+ err := l.scanEscapeSequence()
+ if err != nil {
+ return l.errorf(err.Error())
+ }
+ }
+
+ case isEOL(r):
+ l.emit(itemValue)
+ l.ignore()
+ return lexBeforeKey
+
+ case isEOF(r):
+ l.emit(itemValue)
+ l.emit(itemEOF)
+ return nil
+
+ default:
+ l.appendRune(r)
+ }
+ }
+}
+
+// scanEscapeSequence scans either one of the escaped characters
+// or a unicode literal. We expect to be after the escape character.
+func (l *lexer) scanEscapeSequence() error {
+ switch r := l.next(); {
+
+ case isEscapedCharacter(r):
+ l.appendRune(decodeEscapedCharacter(r))
+ return nil
+
+ case atUnicodeLiteral(r):
+ return l.scanUnicodeLiteral()
+
+ case isEOF(r):
+ return fmt.Errorf("premature EOF")
+
+ // silently drop the escape character and append the rune as is
+ default:
+ l.appendRune(r)
+ return nil
+ }
+}
+
+// scans a unicode literal in the form \uXXXX. We expect to be after the \u.
+func (l *lexer) scanUnicodeLiteral() error {
+ // scan the digits
+ d := make([]rune, 4)
+ for i := 0; i < 4; i++ {
+ d[i] = l.next()
+ if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) {
+ return fmt.Errorf("invalid unicode literal")
+ }
+ }
+
+ // decode the digits into a rune
+ r, err := strconv.ParseInt(string(d), 16, 0)
+ if err != nil {
+ return err
+ }
+
+ l.appendRune(rune(r))
+ return nil
+}
+
+// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character.
+func decodeEscapedCharacter(r rune) rune {
+ switch r {
+ case 'f':
+ return '\f'
+ case 'n':
+ return '\n'
+ case 'r':
+ return '\r'
+ case 't':
+ return '\t'
+ default:
+ return r
+ }
+}
+
+// atUnicodeLiteral reports whether we are at a unicode literal.
+// The escape character has already been consumed.
+func atUnicodeLiteral(r rune) bool {
+ return r == 'u'
+}
+
+// isComment reports whether we are at the start of a comment.
+func isComment(r rune) bool {
+ return r == '#' || r == '!'
+}
+
+// isEndOfKey reports whether the rune terminates the current key.
+func isEndOfKey(r rune) bool {
+ return strings.ContainsRune(" \f\t\r\n:=", r)
+}
+
+// isEOF reports whether we are at EOF.
+func isEOF(r rune) bool {
+ return r == eof
+}
+
+// isEOL reports whether we are at a new line character.
+func isEOL(r rune) bool {
+ return r == '\n' || r == '\r'
+}
+
+// isEscape reports whether the rune is the escape character which
+// prefixes unicode literals and other escaped characters.
+func isEscape(r rune) bool {
+ return r == '\\'
+}
+
+// isEscapedCharacter reports whether we are at one of the characters that need escaping.
+// The escape character has already been consumed.
+func isEscapedCharacter(r rune) bool {
+ return strings.ContainsRune(" :=fnrt", r)
+}
+
+// isWhitespace reports whether the rune is a whitespace character.
+func isWhitespace(r rune) bool {
+ return strings.ContainsRune(whitespace, r)
+}
diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go
new file mode 100644
index 0000000000..635368dc8a
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/load.go
@@ -0,0 +1,293 @@
+// Copyright 2013-2022 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "strings"
+)
+
+// Encoding specifies encoding of the input data.
+type Encoding uint
+
+const (
+ // utf8Default is a private placeholder for the zero value of Encoding to
+ // ensure that it has the correct meaning. UTF8 is the default encoding but
+ // was assigned a non-zero value which cannot be changed without breaking
+ // existing code. Clients should continue to use the public constants.
+ utf8Default Encoding = iota
+
+ // UTF8 interprets the input data as UTF-8.
+ UTF8
+
+ // ISO_8859_1 interprets the input data as ISO-8859-1.
+ ISO_8859_1
+)
+
+type Loader struct {
+ // Encoding determines how the data from files and byte buffers
+ // is interpreted. For URLs the Content-Type header is used
+ // to determine the encoding of the data.
+ Encoding Encoding
+
+ // DisableExpansion configures the property expansion of the
+ // returned property object. When set to true, the property values
+ // will not be expanded and the Property object will not be checked
+ // for invalid expansion expressions.
+ DisableExpansion bool
+
+ // IgnoreMissing configures whether missing files or URLs which return
+ // 404 are reported as errors. When set to true, missing files and 404
+ // status codes are not reported as errors.
+ IgnoreMissing bool
+}
+
+// Load reads a buffer into a Properties struct.
+func (l *Loader) LoadBytes(buf []byte) (*Properties, error) {
+ return l.loadBytes(buf, l.Encoding)
+}
+
+// LoadAll reads the content of multiple URLs or files in the given order into
+// a Properties struct. If IgnoreMissing is true then a 404 status code or
+// missing file will not be reported as error. Encoding sets the encoding for
+// files. For the URLs see LoadURL for the Content-Type header and the
+// encoding.
+func (l *Loader) LoadAll(names []string) (*Properties, error) {
+ all := NewProperties()
+ for _, name := range names {
+ n, err := expandName(name)
+ if err != nil {
+ return nil, err
+ }
+
+ var p *Properties
+ switch {
+ case strings.HasPrefix(n, "http://"):
+ p, err = l.LoadURL(n)
+ case strings.HasPrefix(n, "https://"):
+ p, err = l.LoadURL(n)
+ default:
+ p, err = l.LoadFile(n)
+ }
+ if err != nil {
+ return nil, err
+ }
+ all.Merge(p)
+ }
+
+ all.DisableExpansion = l.DisableExpansion
+ if all.DisableExpansion {
+ return all, nil
+ }
+ return all, all.check()
+}
+
+// LoadFile reads a file into a Properties struct.
+// If IgnoreMissing is true then a missing file will not be
+// reported as error.
+func (l *Loader) LoadFile(filename string) (*Properties, error) {
+ data, err := ioutil.ReadFile(filename)
+ if err != nil {
+ if l.IgnoreMissing && os.IsNotExist(err) {
+ LogPrintf("properties: %s not found. skipping", filename)
+ return NewProperties(), nil
+ }
+ return nil, err
+ }
+ return l.loadBytes(data, l.Encoding)
+}
+
+// LoadURL reads the content of the URL into a Properties struct.
+//
+// The encoding is determined via the Content-Type header which
+// should be set to 'text/plain'. If the 'charset' parameter is
+// missing, 'iso-8859-1' or 'latin1' the encoding is set to
+// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the
+// encoding is set to UTF-8. A missing content type header is
+// interpreted as 'text/plain; charset=utf-8'.
+func (l *Loader) LoadURL(url string) (*Properties, error) {
+ resp, err := http.Get(url)
+ if err != nil {
+ return nil, fmt.Errorf("properties: error fetching %q. %s", url, err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode == 404 && l.IgnoreMissing {
+ LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode)
+ return NewProperties(), nil
+ }
+
+ if resp.StatusCode != 200 {
+ return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode)
+ }
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("properties: %s error reading response. %s", url, err)
+ }
+
+ ct := resp.Header.Get("Content-Type")
+ ct = strings.Join(strings.Fields(ct), "")
+ var enc Encoding
+ switch strings.ToLower(ct) {
+ case "text/plain", "text/plain;charset=iso-8859-1", "text/plain;charset=latin1":
+ enc = ISO_8859_1
+ case "", "text/plain;charset=utf-8":
+ enc = UTF8
+ default:
+ return nil, fmt.Errorf("properties: invalid content type %s", ct)
+ }
+
+ return l.loadBytes(body, enc)
+}
+
+func (l *Loader) loadBytes(buf []byte, enc Encoding) (*Properties, error) {
+ p, err := parse(convert(buf, enc))
+ if err != nil {
+ return nil, err
+ }
+ p.DisableExpansion = l.DisableExpansion
+ if p.DisableExpansion {
+ return p, nil
+ }
+ return p, p.check()
+}
+
+// Load reads a buffer into a Properties struct.
+func Load(buf []byte, enc Encoding) (*Properties, error) {
+ l := &Loader{Encoding: enc}
+ return l.LoadBytes(buf)
+}
+
+// LoadString reads an UTF8 string into a properties struct.
+func LoadString(s string) (*Properties, error) {
+ l := &Loader{Encoding: UTF8}
+ return l.LoadBytes([]byte(s))
+}
+
+// LoadMap creates a new Properties struct from a string map.
+func LoadMap(m map[string]string) *Properties {
+ p := NewProperties()
+ for k, v := range m {
+ p.Set(k, v)
+ }
+ return p
+}
+
+// LoadFile reads a file into a Properties struct.
+func LoadFile(filename string, enc Encoding) (*Properties, error) {
+ l := &Loader{Encoding: enc}
+ return l.LoadAll([]string{filename})
+}
+
+// LoadFiles reads multiple files in the given order into
+// a Properties struct. If 'ignoreMissing' is true then
+// non-existent files will not be reported as error.
+func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) {
+ l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing}
+ return l.LoadAll(filenames)
+}
+
+// LoadURL reads the content of the URL into a Properties struct.
+// See Loader#LoadURL for details.
+func LoadURL(url string) (*Properties, error) {
+ l := &Loader{Encoding: UTF8}
+ return l.LoadAll([]string{url})
+}
+
+// LoadURLs reads the content of multiple URLs in the given order into a
+// Properties struct. If IgnoreMissing is true then a 404 status code will
+// not be reported as error. See Loader#LoadURL for the Content-Type header
+// and the encoding.
+func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) {
+ l := &Loader{Encoding: UTF8, IgnoreMissing: ignoreMissing}
+ return l.LoadAll(urls)
+}
+
+// LoadAll reads the content of multiple URLs or files in the given order into a
+// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will
+// not be reported as error. Encoding sets the encoding for files. For the URLs please see
+// LoadURL for the Content-Type header and the encoding.
+func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) {
+ l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing}
+ return l.LoadAll(names)
+}
+
+// MustLoadString reads an UTF8 string into a Properties struct and
+// panics on error.
+func MustLoadString(s string) *Properties {
+ return must(LoadString(s))
+}
+
+// MustLoadFile reads a file into a Properties struct and
+// panics on error.
+func MustLoadFile(filename string, enc Encoding) *Properties {
+ return must(LoadFile(filename, enc))
+}
+
+// MustLoadFiles reads multiple files in the given order into
+// a Properties struct and panics on error. If 'ignoreMissing'
+// is true then non-existent files will not be reported as error.
+func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties {
+ return must(LoadFiles(filenames, enc, ignoreMissing))
+}
+
+// MustLoadURL reads the content of a URL into a Properties struct and
+// panics on error.
+func MustLoadURL(url string) *Properties {
+ return must(LoadURL(url))
+}
+
+// MustLoadURLs reads the content of multiple URLs in the given order into a
+// Properties struct and panics on error. If 'ignoreMissing' is true then a 404
+// status code will not be reported as error.
+func MustLoadURLs(urls []string, ignoreMissing bool) *Properties {
+ return must(LoadURLs(urls, ignoreMissing))
+}
+
+// MustLoadAll reads the content of multiple URLs or files in the given order into a
+// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will
+// not be reported as error. Encoding sets the encoding for files. For the URLs please see
+// LoadURL for the Content-Type header and the encoding. It panics on error.
+func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties {
+ return must(LoadAll(names, enc, ignoreMissing))
+}
+
+func must(p *Properties, err error) *Properties {
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return p
+}
+
+// expandName expands ${ENV_VAR} expressions in a name.
+// If the environment variable does not exist then it will be replaced
+// with an empty string. Malformed expressions like "${ENV_VAR" will
+// be reported as error.
+func expandName(name string) (string, error) {
+ return expand(name, []string{}, "${", "}", make(map[string]string))
+}
+
+// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string.
+// For ISO-8859-1 we can convert each byte straight into a rune since the
+// first 256 unicode code points cover ISO-8859-1.
+func convert(buf []byte, enc Encoding) string {
+ switch enc {
+ case utf8Default, UTF8:
+ return string(buf)
+ case ISO_8859_1:
+ runes := make([]rune, len(buf))
+ for i, b := range buf {
+ runes[i] = rune(b)
+ }
+ return string(runes)
+ default:
+ ErrorHandler(fmt.Errorf("unsupported encoding %v", enc))
+ }
+ panic("ErrorHandler should exit")
+}
diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go
new file mode 100644
index 0000000000..fccfd39f6b
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/parser.go
@@ -0,0 +1,86 @@
+// Copyright 2013-2022 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+ "fmt"
+ "runtime"
+)
+
+type parser struct {
+ lex *lexer
+}
+
+func parse(input string) (properties *Properties, err error) {
+ p := &parser{lex: lex(input)}
+ defer p.recover(&err)
+
+ properties = NewProperties()
+ key := ""
+ comments := []string{}
+
+ for {
+ token := p.expectOneOf(itemComment, itemKey, itemEOF)
+ switch token.typ {
+ case itemEOF:
+ goto done
+ case itemComment:
+ comments = append(comments, token.val)
+ continue
+ case itemKey:
+ key = token.val
+ if _, ok := properties.m[key]; !ok {
+ properties.k = append(properties.k, key)
+ }
+ }
+
+ token = p.expectOneOf(itemValue, itemEOF)
+ if len(comments) > 0 {
+ properties.c[key] = comments
+ comments = []string{}
+ }
+ switch token.typ {
+ case itemEOF:
+ properties.m[key] = ""
+ goto done
+ case itemValue:
+ properties.m[key] = token.val
+ }
+ }
+
+done:
+ return properties, nil
+}
+
+func (p *parser) errorf(format string, args ...interface{}) {
+ format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format)
+ panic(fmt.Errorf(format, args...))
+}
+
+func (p *parser) expectOneOf(expected ...itemType) (token item) {
+ token = p.lex.nextItem()
+ for _, v := range expected {
+ if token.typ == v {
+ return token
+ }
+ }
+ p.unexpected(token)
+ panic("unexpected token")
+}
+
+func (p *parser) unexpected(token item) {
+ p.errorf(token.String())
+}
+
+// recover is the handler that turns panics into returns from the top level of Parse.
+func (p *parser) recover(errp *error) {
+ e := recover()
+ if e != nil {
+ if _, ok := e.(runtime.Error); ok {
+ panic(e)
+ }
+ *errp = e.(error)
+ }
+}
diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go
new file mode 100644
index 0000000000..fb2f7b4048
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/properties.go
@@ -0,0 +1,848 @@
+// Copyright 2013-2022 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer.
+// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used.
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+const maxExpansionDepth = 64
+
+// ErrorHandlerFunc defines the type of function which handles failures
+// of the MustXXX() functions. An error handler function must exit
+// the application after handling the error.
+type ErrorHandlerFunc func(error)
+
+// ErrorHandler is the function which handles failures of the MustXXX()
+// functions. The default is LogFatalHandler.
+var ErrorHandler ErrorHandlerFunc = LogFatalHandler
+
+// LogHandlerFunc defines the function prototype for logging errors.
+type LogHandlerFunc func(fmt string, args ...interface{})
+
+// LogPrintf defines a log handler which uses log.Printf.
+var LogPrintf LogHandlerFunc = log.Printf
+
+// LogFatalHandler handles the error by logging a fatal error and exiting.
+func LogFatalHandler(err error) {
+ log.Fatal(err)
+}
+
+// PanicHandler handles the error by panicking.
+func PanicHandler(err error) {
+ panic(err)
+}
+
+// -----------------------------------------------------------------------------
+
+// A Properties contains the key/value pairs from the properties input.
+// All values are stored in unexpanded form and are expanded at runtime
+type Properties struct {
+ // Pre-/Postfix for property expansion.
+ Prefix string
+ Postfix string
+
+ // DisableExpansion controls the expansion of properties on Get()
+ // and the check for circular references on Set(). When set to
+ // true Properties behaves like a simple key/value store and does
+ // not check for circular references on Get() or on Set().
+ DisableExpansion bool
+
+ // Stores the key/value pairs
+ m map[string]string
+
+ // Stores the comments per key.
+ c map[string][]string
+
+ // Stores the keys in order of appearance.
+ k []string
+
+ // WriteSeparator specifies the separator of key and value while writing the properties.
+ WriteSeparator string
+}
+
+// NewProperties creates a new Properties struct with the default
+// configuration for "${key}" expressions.
+func NewProperties() *Properties {
+ return &Properties{
+ Prefix: "${",
+ Postfix: "}",
+ m: map[string]string{},
+ c: map[string][]string{},
+ k: []string{},
+ }
+}
+
+// Load reads a buffer into the given Properties struct.
+func (p *Properties) Load(buf []byte, enc Encoding) error {
+ l := &Loader{Encoding: enc, DisableExpansion: p.DisableExpansion}
+ newProperties, err := l.LoadBytes(buf)
+ if err != nil {
+ return err
+ }
+ p.Merge(newProperties)
+ return nil
+}
+
+// Get returns the expanded value for the given key if exists.
+// Otherwise, ok is false.
+func (p *Properties) Get(key string) (value string, ok bool) {
+ v, ok := p.m[key]
+ if p.DisableExpansion {
+ return v, ok
+ }
+ if !ok {
+ return "", false
+ }
+
+ expanded, err := p.expand(key, v)
+
+ // we guarantee that the expanded value is free of
+ // circular references and malformed expressions
+ // so we panic if we still get an error here.
+ if err != nil {
+ ErrorHandler(err)
+ }
+
+ return expanded, true
+}
+
+// MustGet returns the expanded value for the given key if exists.
+// Otherwise, it panics.
+func (p *Properties) MustGet(key string) string {
+ if v, ok := p.Get(key); ok {
+ return v
+ }
+ ErrorHandler(invalidKeyError(key))
+ panic("ErrorHandler should exit")
+}
+
+// ----------------------------------------------------------------------------
+
+// ClearComments removes the comments for all keys.
+func (p *Properties) ClearComments() {
+ p.c = map[string][]string{}
+}
+
+// ----------------------------------------------------------------------------
+
+// GetComment returns the last comment before the given key or an empty string.
+func (p *Properties) GetComment(key string) string {
+ comments, ok := p.c[key]
+ if !ok || len(comments) == 0 {
+ return ""
+ }
+ return comments[len(comments)-1]
+}
+
+// ----------------------------------------------------------------------------
+
+// GetComments returns all comments that appeared before the given key or nil.
+func (p *Properties) GetComments(key string) []string {
+ if comments, ok := p.c[key]; ok {
+ return comments
+ }
+ return nil
+}
+
+// ----------------------------------------------------------------------------
+
+// SetComment sets the comment for the key.
+func (p *Properties) SetComment(key, comment string) {
+ p.c[key] = []string{comment}
+}
+
+// ----------------------------------------------------------------------------
+
+// SetComments sets the comments for the key. If the comments are nil then
+// all comments for this key are deleted.
+func (p *Properties) SetComments(key string, comments []string) {
+ if comments == nil {
+ delete(p.c, key)
+ return
+ }
+ p.c[key] = comments
+}
+
+// ----------------------------------------------------------------------------
+
+// GetBool checks if the expanded value is one of '1', 'yes',
+// 'true' or 'on' if the key exists. The comparison is case-insensitive.
+// If the key does not exist the default value is returned.
+func (p *Properties) GetBool(key string, def bool) bool {
+ v, err := p.getBool(key)
+ if err != nil {
+ return def
+ }
+ return v
+}
+
+// MustGetBool checks if the expanded value is one of '1', 'yes',
+// 'true' or 'on' if the key exists. The comparison is case-insensitive.
+// If the key does not exist the function panics.
+func (p *Properties) MustGetBool(key string) bool {
+ v, err := p.getBool(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return v
+}
+
+func (p *Properties) getBool(key string) (value bool, err error) {
+ if v, ok := p.Get(key); ok {
+ return boolVal(v), nil
+ }
+ return false, invalidKeyError(key)
+}
+
+func boolVal(v string) bool {
+ v = strings.ToLower(v)
+ return v == "1" || v == "true" || v == "yes" || v == "on"
+}
+
+// ----------------------------------------------------------------------------
+
+// GetDuration parses the expanded value as an time.Duration (in ns) if the
+// key exists. If key does not exist or the value cannot be parsed the default
+// value is returned. In almost all cases you want to use GetParsedDuration().
+func (p *Properties) GetDuration(key string, def time.Duration) time.Duration {
+ v, err := p.getInt64(key)
+ if err != nil {
+ return def
+ }
+ return time.Duration(v)
+}
+
+// MustGetDuration parses the expanded value as an time.Duration (in ns) if
+// the key exists. If key does not exist or the value cannot be parsed the
+// function panics. In almost all cases you want to use MustGetParsedDuration().
+func (p *Properties) MustGetDuration(key string) time.Duration {
+ v, err := p.getInt64(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return time.Duration(v)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration {
+ s, ok := p.Get(key)
+ if !ok {
+ return def
+ }
+ v, err := time.ParseDuration(s)
+ if err != nil {
+ return def
+ }
+ return v
+}
+
+// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetParsedDuration(key string) time.Duration {
+ s, ok := p.Get(key)
+ if !ok {
+ ErrorHandler(invalidKeyError(key))
+ }
+ v, err := time.ParseDuration(s)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return v
+}
+
+// ----------------------------------------------------------------------------
+
+// GetFloat64 parses the expanded value as a float64 if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetFloat64(key string, def float64) float64 {
+ v, err := p.getFloat64(key)
+ if err != nil {
+ return def
+ }
+ return v
+}
+
+// MustGetFloat64 parses the expanded value as a float64 if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetFloat64(key string) float64 {
+ v, err := p.getFloat64(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return v
+}
+
+func (p *Properties) getFloat64(key string) (value float64, err error) {
+ if v, ok := p.Get(key); ok {
+ value, err = strconv.ParseFloat(v, 64)
+ if err != nil {
+ return 0, err
+ }
+ return value, nil
+ }
+ return 0, invalidKeyError(key)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetInt parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned. If the value does not fit into an int the
+// function panics with an out of range error.
+func (p *Properties) GetInt(key string, def int) int {
+ v, err := p.getInt64(key)
+ if err != nil {
+ return def
+ }
+ return intRangeCheck(key, v)
+}
+
+// MustGetInt parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+// If the value does not fit into an int the function panics with
+// an out of range error.
+func (p *Properties) MustGetInt(key string) int {
+ v, err := p.getInt64(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return intRangeCheck(key, v)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetInt64 parses the expanded value as an int64 if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetInt64(key string, def int64) int64 {
+ v, err := p.getInt64(key)
+ if err != nil {
+ return def
+ }
+ return v
+}
+
+// MustGetInt64 parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetInt64(key string) int64 {
+ v, err := p.getInt64(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return v
+}
+
+func (p *Properties) getInt64(key string) (value int64, err error) {
+ if v, ok := p.Get(key); ok {
+ value, err = strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ return value, nil
+ }
+ return 0, invalidKeyError(key)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetUint parses the expanded value as an uint if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned. If the value does not fit into an int the
+// function panics with an out of range error.
+func (p *Properties) GetUint(key string, def uint) uint {
+ v, err := p.getUint64(key)
+ if err != nil {
+ return def
+ }
+ return uintRangeCheck(key, v)
+}
+
+// MustGetUint parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+// If the value does not fit into an int the function panics with
+// an out of range error.
+func (p *Properties) MustGetUint(key string) uint {
+ v, err := p.getUint64(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return uintRangeCheck(key, v)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetUint64 parses the expanded value as an uint64 if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetUint64(key string, def uint64) uint64 {
+ v, err := p.getUint64(key)
+ if err != nil {
+ return def
+ }
+ return v
+}
+
+// MustGetUint64 parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetUint64(key string) uint64 {
+ v, err := p.getUint64(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return v
+}
+
+func (p *Properties) getUint64(key string) (value uint64, err error) {
+ if v, ok := p.Get(key); ok {
+ value, err = strconv.ParseUint(v, 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ return value, nil
+ }
+ return 0, invalidKeyError(key)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetString returns the expanded value for the given key if exists or
+// the default value otherwise.
+func (p *Properties) GetString(key, def string) string {
+ if v, ok := p.Get(key); ok {
+ return v
+ }
+ return def
+}
+
+// MustGetString returns the expanded value for the given key if exists or
+// panics otherwise.
+func (p *Properties) MustGetString(key string) string {
+ if v, ok := p.Get(key); ok {
+ return v
+ }
+ ErrorHandler(invalidKeyError(key))
+ panic("ErrorHandler should exit")
+}
+
+// ----------------------------------------------------------------------------
+
+// Filter returns a new properties object which contains all properties
+// for which the key matches the pattern.
+func (p *Properties) Filter(pattern string) (*Properties, error) {
+ re, err := regexp.Compile(pattern)
+ if err != nil {
+ return nil, err
+ }
+
+ return p.FilterRegexp(re), nil
+}
+
+// FilterRegexp returns a new properties object which contains all properties
+// for which the key matches the regular expression.
+func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties {
+ pp := NewProperties()
+ for _, k := range p.k {
+ if re.MatchString(k) {
+ // TODO(fs): we are ignoring the error which flags a circular reference.
+ // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed)
+ pp.Set(k, p.m[k])
+ }
+ }
+ return pp
+}
+
+// FilterPrefix returns a new properties object with a subset of all keys
+// with the given prefix.
+func (p *Properties) FilterPrefix(prefix string) *Properties {
+ pp := NewProperties()
+ for _, k := range p.k {
+ if strings.HasPrefix(k, prefix) {
+ // TODO(fs): we are ignoring the error which flags a circular reference.
+ // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed)
+ pp.Set(k, p.m[k])
+ }
+ }
+ return pp
+}
+
+// FilterStripPrefix returns a new properties object with a subset of all keys
+// with the given prefix and the prefix removed from the keys.
+func (p *Properties) FilterStripPrefix(prefix string) *Properties {
+ pp := NewProperties()
+ n := len(prefix)
+ for _, k := range p.k {
+ if len(k) > len(prefix) && strings.HasPrefix(k, prefix) {
+ // TODO(fs): we are ignoring the error which flags a circular reference.
+ // TODO(fs): since we are modifying keys I am not entirely sure whether we can create a circular reference
+ // TODO(fs): this function should probably return an error but the signature is fixed
+ pp.Set(k[n:], p.m[k])
+ }
+ }
+ return pp
+}
+
+// Len returns the number of keys.
+func (p *Properties) Len() int {
+ return len(p.m)
+}
+
+// Keys returns all keys in the same order as in the input.
+func (p *Properties) Keys() []string {
+ keys := make([]string, len(p.k))
+ copy(keys, p.k)
+ return keys
+}
+
+// Set sets the property key to the corresponding value.
+// If a value for key existed before then ok is true and prev
+// contains the previous value. If the value contains a
+// circular reference or a malformed expression then
+// an error is returned.
+// An empty key is silently ignored.
+func (p *Properties) Set(key, value string) (prev string, ok bool, err error) {
+ if key == "" {
+ return "", false, nil
+ }
+
+ // if expansion is disabled we allow circular references
+ if p.DisableExpansion {
+ prev, ok = p.Get(key)
+ p.m[key] = value
+ if !ok {
+ p.k = append(p.k, key)
+ }
+ return prev, ok, nil
+ }
+
+ // to check for a circular reference we temporarily need
+ // to set the new value. If there is an error then revert
+ // to the previous state. Only if all tests are successful
+ // then we add the key to the p.k list.
+ prev, ok = p.Get(key)
+ p.m[key] = value
+
+ // now check for a circular reference
+ _, err = p.expand(key, value)
+ if err != nil {
+
+ // revert to the previous state
+ if ok {
+ p.m[key] = prev
+ } else {
+ delete(p.m, key)
+ }
+
+ return "", false, err
+ }
+
+ if !ok {
+ p.k = append(p.k, key)
+ }
+
+ return prev, ok, nil
+}
+
+// SetValue sets property key to the default string value
+// as defined by fmt.Sprintf("%v").
+func (p *Properties) SetValue(key string, value interface{}) error {
+ _, _, err := p.Set(key, fmt.Sprintf("%v", value))
+ return err
+}
+
+// MustSet sets the property key to the corresponding value.
+// If a value for key existed before then ok is true and prev
+// contains the previous value. An empty key is silently ignored.
+func (p *Properties) MustSet(key, value string) (prev string, ok bool) {
+ prev, ok, err := p.Set(key, value)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return prev, ok
+}
+
+// String returns a string of all expanded 'key = value' pairs.
+func (p *Properties) String() string {
+ var s string
+ for _, key := range p.k {
+ value, _ := p.Get(key)
+ s = fmt.Sprintf("%s%s = %s\n", s, key, value)
+ }
+ return s
+}
+
+// Sort sorts the properties keys in alphabetical order.
+// This is helpfully before writing the properties.
+func (p *Properties) Sort() {
+ sort.Strings(p.k)
+}
+
+// Write writes all unexpanded 'key = value' pairs to the given writer.
+// Write returns the number of bytes written and any write error encountered.
+func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) {
+ return p.WriteComment(w, "", enc)
+}
+
+// WriteComment writes all unexpanced 'key = value' pairs to the given writer.
+// If prefix is not empty then comments are written with a blank line and the
+// given prefix. The prefix should be either "# " or "! " to be compatible with
+// the properties file format. Otherwise, the properties parser will not be
+// able to read the file back in. It returns the number of bytes written and
+// any write error encountered.
+func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) {
+ var x int
+
+ for _, key := range p.k {
+ value := p.m[key]
+
+ if prefix != "" {
+ if comments, ok := p.c[key]; ok {
+ // don't print comments if they are all empty
+ allEmpty := true
+ for _, c := range comments {
+ if c != "" {
+ allEmpty = false
+ break
+ }
+ }
+
+ if !allEmpty {
+ // add a blank line between entries but not at the top
+ if len(comments) > 0 && n > 0 {
+ x, err = fmt.Fprintln(w)
+ if err != nil {
+ return
+ }
+ n += x
+ }
+
+ for _, c := range comments {
+ x, err = fmt.Fprintf(w, "%s%s\n", prefix, c)
+ if err != nil {
+ return
+ }
+ n += x
+ }
+ }
+ }
+ }
+ sep := " = "
+ if p.WriteSeparator != "" {
+ sep = p.WriteSeparator
+ }
+ x, err = fmt.Fprintf(w, "%s%s%s\n", encode(key, " :", enc), sep, encode(value, "", enc))
+ if err != nil {
+ return
+ }
+ n += x
+ }
+ return
+}
+
+// Map returns a copy of the properties as a map.
+func (p *Properties) Map() map[string]string {
+ m := make(map[string]string)
+ for k, v := range p.m {
+ m[k] = v
+ }
+ return m
+}
+
+// FilterFunc returns a copy of the properties which includes the values which passed all filters.
+func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties {
+ pp := NewProperties()
+outer:
+ for k, v := range p.m {
+ for _, f := range filters {
+ if !f(k, v) {
+ continue outer
+ }
+ pp.Set(k, v)
+ }
+ }
+ return pp
+}
+
+// ----------------------------------------------------------------------------
+
+// Delete removes the key and its comments.
+func (p *Properties) Delete(key string) {
+ delete(p.m, key)
+ delete(p.c, key)
+ newKeys := []string{}
+ for _, k := range p.k {
+ if k != key {
+ newKeys = append(newKeys, k)
+ }
+ }
+ p.k = newKeys
+}
+
+// Merge merges properties, comments and keys from other *Properties into p
+func (p *Properties) Merge(other *Properties) {
+ for _, k := range other.k {
+ if _, ok := p.m[k]; !ok {
+ p.k = append(p.k, k)
+ }
+ }
+ for k, v := range other.m {
+ p.m[k] = v
+ }
+ for k, v := range other.c {
+ p.c[k] = v
+ }
+}
+
+// ----------------------------------------------------------------------------
+
+// check expands all values and returns an error if a circular reference or
+// a malformed expression was found.
+func (p *Properties) check() error {
+ for key, value := range p.m {
+ if _, err := p.expand(key, value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (p *Properties) expand(key, input string) (string, error) {
+ // no pre/postfix -> nothing to expand
+ if p.Prefix == "" && p.Postfix == "" {
+ return input, nil
+ }
+
+ return expand(input, []string{key}, p.Prefix, p.Postfix, p.m)
+}
+
+// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values.
+// The function keeps track of the keys that were already expanded and stops if it
+// detects a circular reference or a malformed expression of the form '(prefix)key'.
+func expand(s string, keys []string, prefix, postfix string, values map[string]string) (string, error) {
+ if len(keys) > maxExpansionDepth {
+ return "", fmt.Errorf("expansion too deep")
+ }
+
+ for {
+ start := strings.Index(s, prefix)
+ if start == -1 {
+ return s, nil
+ }
+
+ keyStart := start + len(prefix)
+ keyLen := strings.Index(s[keyStart:], postfix)
+ if keyLen == -1 {
+ return "", fmt.Errorf("malformed expression")
+ }
+
+ end := keyStart + keyLen + len(postfix) - 1
+ key := s[keyStart : keyStart+keyLen]
+
+ // fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key)
+
+ for _, k := range keys {
+ if key == k {
+ var b bytes.Buffer
+ b.WriteString("circular reference in:\n")
+ for _, k1 := range keys {
+ fmt.Fprintf(&b, "%s=%s\n", k1, values[k1])
+ }
+ return "", fmt.Errorf(b.String())
+ }
+ }
+
+ val, ok := values[key]
+ if !ok {
+ val = os.Getenv(key)
+ }
+ new_val, err := expand(val, append(keys, key), prefix, postfix, values)
+ if err != nil {
+ return "", err
+ }
+ s = s[:start] + new_val + s[end+1:]
+ }
+}
+
+// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters.
+func encode(s string, special string, enc Encoding) string {
+ switch enc {
+ case UTF8:
+ return encodeUtf8(s, special)
+ case ISO_8859_1:
+ return encodeIso(s, special)
+ default:
+ panic(fmt.Sprintf("unsupported encoding %v", enc))
+ }
+}
+
+func encodeUtf8(s string, special string) string {
+ v := ""
+ for pos := 0; pos < len(s); {
+ r, w := utf8.DecodeRuneInString(s[pos:])
+ pos += w
+ v += escape(r, special)
+ }
+ return v
+}
+
+func encodeIso(s string, special string) string {
+ var r rune
+ var w int
+ var v string
+ for pos := 0; pos < len(s); {
+ switch r, w = utf8.DecodeRuneInString(s[pos:]); {
+ case r < 1<<8: // single byte rune -> escape special chars only
+ v += escape(r, special)
+ case r < 1<<16: // two byte rune -> unicode literal
+ v += fmt.Sprintf("\\u%04x", r)
+ default: // more than two bytes per rune -> can't encode
+ v += "?"
+ }
+ pos += w
+ }
+ return v
+}
+
+func escape(r rune, special string) string {
+ switch r {
+ case '\f':
+ return "\\f"
+ case '\n':
+ return "\\n"
+ case '\r':
+ return "\\r"
+ case '\t':
+ return "\\t"
+ case '\\':
+ return "\\\\"
+ default:
+ if strings.ContainsRune(special, r) {
+ return "\\" + string(r)
+ }
+ return string(r)
+ }
+}
+
+func invalidKeyError(key string) error {
+ return fmt.Errorf("unknown property: %s", key)
+}
diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go
new file mode 100644
index 0000000000..dbd60b36e7
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/rangecheck.go
@@ -0,0 +1,31 @@
+// Copyright 2013-2022 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+ "fmt"
+ "math"
+)
+
+// make this a var to overwrite it in a test
+var is32Bit = ^uint(0) == math.MaxUint32
+
+// intRangeCheck checks if the value fits into the int type and
+// panics if it does not.
+func intRangeCheck(key string, v int64) int {
+ if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) {
+ panic(fmt.Sprintf("Value %d for key %s out of range", v, key))
+ }
+ return int(v)
+}
+
+// uintRangeCheck checks if the value fits into the uint type and
+// panics if it does not.
+func uintRangeCheck(key string, v uint64) uint {
+ if is32Bit && v > math.MaxUint32 {
+ panic(fmt.Sprintf("Value %d for key %s out of range", v, key))
+ }
+ return uint(v)
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
index d569c0c949..d0ea68f408 100644
--- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go
+++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
@@ -1,6 +1,7 @@
-//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine
+//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine && !tinygo
// +build darwin freebsd openbsd netbsd dragonfly hurd
// +build !appengine
+// +build !tinygo
package isatty
diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go
index 31503226f6..7402e0618a 100644
--- a/vendor/github.com/mattn/go-isatty/isatty_others.go
+++ b/vendor/github.com/mattn/go-isatty/isatty_others.go
@@ -1,5 +1,6 @@
-//go:build appengine || js || nacl || wasm
-// +build appengine js nacl wasm
+//go:build (appengine || js || nacl || tinygo || wasm) && !windows
+// +build appengine js nacl tinygo wasm
+// +build !windows
package isatty
diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go
index 67787657fb..0337d8cf6d 100644
--- a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go
+++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go
@@ -1,6 +1,7 @@
-//go:build (linux || aix || zos) && !appengine
+//go:build (linux || aix || zos) && !appengine && !tinygo
// +build linux aix zos
// +build !appengine
+// +build !tinygo
package isatty
diff --git a/vendor/github.com/mohae/deepcopy/.gitignore b/vendor/github.com/mohae/deepcopy/.gitignore
new file mode 100644
index 0000000000..5846dd1531
--- /dev/null
+++ b/vendor/github.com/mohae/deepcopy/.gitignore
@@ -0,0 +1,26 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*~
+*.out
+*.log
diff --git a/vendor/github.com/mohae/deepcopy/.travis.yml b/vendor/github.com/mohae/deepcopy/.travis.yml
new file mode 100644
index 0000000000..fd47a8cf78
--- /dev/null
+++ b/vendor/github.com/mohae/deepcopy/.travis.yml
@@ -0,0 +1,11 @@
+language: go
+
+go:
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+
+script:
+ - go test ./...
diff --git a/vendor/github.com/mohae/deepcopy/LICENSE b/vendor/github.com/mohae/deepcopy/LICENSE
new file mode 100644
index 0000000000..419673f005
--- /dev/null
+++ b/vendor/github.com/mohae/deepcopy/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Joel
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/mohae/deepcopy/README.md b/vendor/github.com/mohae/deepcopy/README.md
new file mode 100644
index 0000000000..f81841885b
--- /dev/null
+++ b/vendor/github.com/mohae/deepcopy/README.md
@@ -0,0 +1,8 @@
+deepCopy
+========
+[![GoDoc](https://godoc.org/github.com/mohae/deepcopy?status.svg)](https://godoc.org/github.com/mohae/deepcopy)[![Build Status](https://travis-ci.org/mohae/deepcopy.png)](https://travis-ci.org/mohae/deepcopy)
+
+DeepCopy makes deep copies of things: unexported field values are not copied.
+
+## Usage
+ cpy := deepcopy.Copy(orig)
diff --git a/vendor/github.com/mohae/deepcopy/deepcopy.go b/vendor/github.com/mohae/deepcopy/deepcopy.go
new file mode 100644
index 0000000000..ba763ad091
--- /dev/null
+++ b/vendor/github.com/mohae/deepcopy/deepcopy.go
@@ -0,0 +1,125 @@
+// deepcopy makes deep copies of things. A standard copy will copy the
+// pointers: deep copy copies the values pointed to. Unexported field
+// values are not copied.
+//
+// Copyright (c)2014-2016, Joel Scoble (github.com/mohae), all rights reserved.
+// License: MIT, for more details check the included LICENSE file.
+package deepcopy
+
+import (
+ "reflect"
+ "time"
+)
+
+// Interface for delegating copy process to type
+type Interface interface {
+ DeepCopy() interface{}
+}
+
+// Iface is an alias to Copy; this exists for backwards compatibility reasons.
+func Iface(iface interface{}) interface{} {
+ return Copy(iface)
+}
+
+// Copy creates a deep copy of whatever is passed to it and returns the copy
+// in an interface{}. The returned value will need to be asserted to the
+// correct type.
+func Copy(src interface{}) interface{} {
+ if src == nil {
+ return nil
+ }
+
+ // Make the interface a reflect.Value
+ original := reflect.ValueOf(src)
+
+ // Make a copy of the same type as the original.
+ cpy := reflect.New(original.Type()).Elem()
+
+ // Recursively copy the original.
+ copyRecursive(original, cpy)
+
+ // Return the copy as an interface.
+ return cpy.Interface()
+}
+
+// copyRecursive does the actual copying of the interface. It currently has
+// limited support for what it can handle. Add as needed.
+func copyRecursive(original, cpy reflect.Value) {
+ // check for implement deepcopy.Interface
+ if original.CanInterface() {
+ if copier, ok := original.Interface().(Interface); ok {
+ cpy.Set(reflect.ValueOf(copier.DeepCopy()))
+ return
+ }
+ }
+
+ // handle according to original's Kind
+ switch original.Kind() {
+ case reflect.Ptr:
+ // Get the actual value being pointed to.
+ originalValue := original.Elem()
+
+ // if it isn't valid, return.
+ if !originalValue.IsValid() {
+ return
+ }
+ cpy.Set(reflect.New(originalValue.Type()))
+ copyRecursive(originalValue, cpy.Elem())
+
+ case reflect.Interface:
+ // If this is a nil, don't do anything
+ if original.IsNil() {
+ return
+ }
+ // Get the value for the interface, not the pointer.
+ originalValue := original.Elem()
+
+ // Get the value by calling Elem().
+ copyValue := reflect.New(originalValue.Type()).Elem()
+ copyRecursive(originalValue, copyValue)
+ cpy.Set(copyValue)
+
+ case reflect.Struct:
+ t, ok := original.Interface().(time.Time)
+ if ok {
+ cpy.Set(reflect.ValueOf(t))
+ return
+ }
+ // Go through each field of the struct and copy it.
+ for i := 0; i < original.NumField(); i++ {
+ // The Type's StructField for a given field is checked to see if StructField.PkgPath
+ // is set to determine if the field is exported or not because CanSet() returns false
+ // for settable fields. I'm not sure why. -mohae
+ if original.Type().Field(i).PkgPath != "" {
+ continue
+ }
+ copyRecursive(original.Field(i), cpy.Field(i))
+ }
+
+ case reflect.Slice:
+ if original.IsNil() {
+ return
+ }
+ // Make a new slice and copy each element.
+ cpy.Set(reflect.MakeSlice(original.Type(), original.Len(), original.Cap()))
+ for i := 0; i < original.Len(); i++ {
+ copyRecursive(original.Index(i), cpy.Index(i))
+ }
+
+ case reflect.Map:
+ if original.IsNil() {
+ return
+ }
+ cpy.Set(reflect.MakeMap(original.Type()))
+ for _, key := range original.MapKeys() {
+ originalValue := original.MapIndex(key)
+ copyValue := reflect.New(originalValue.Type()).Elem()
+ copyRecursive(originalValue, copyValue)
+ copyKey := Copy(key.Interface())
+ cpy.SetMapIndex(reflect.ValueOf(copyKey), copyValue)
+ }
+
+ default:
+ cpy.Set(original)
+ }
+}
diff --git a/vendor/github.com/mostynb/go-grpc-compression/internal/zstd/zstd.go b/vendor/github.com/mostynb/go-grpc-compression/internal/zstd/zstd.go
deleted file mode 100644
index e8737fe40e..0000000000
--- a/vendor/github.com/mostynb/go-grpc-compression/internal/zstd/zstd.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2020 Mostyn Bramley-Moore.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package zstd is a wrapper for using github.com/klauspost/compress/zstd
-// with gRPC.
-package zstd
-
-import (
- "bytes"
- "errors"
- "io"
- "io/ioutil"
-
- "github.com/klauspost/compress/zstd"
- "google.golang.org/grpc/encoding"
-)
-
-const Name = "zstd"
-
-var encoderOptions = []zstd.EOption{
- // The default zstd window size is 8MB, which is much larger than the
- // typical RPC message and wastes a bunch of memory.
- zstd.WithWindowSize(512 * 1024),
-}
-
-type compressor struct {
- encoder *zstd.Encoder
- decoder *zstd.Decoder
-}
-
-func PretendInit(clobbering bool) {
- if !clobbering && encoding.GetCompressor(Name) != nil {
- return
- }
-
- enc, _ := zstd.NewWriter(nil, encoderOptions...)
- dec, _ := zstd.NewReader(nil)
- c := &compressor{
- encoder: enc,
- decoder: dec,
- }
- encoding.RegisterCompressor(c)
-}
-
-var ErrNotInUse = errors.New("SetLevel ineffective because another zstd compressor has been registered")
-
-// SetLevel updates the registered compressor to use a particular compression
-// level. NOTE: this function must only be called from an init function, and
-// is not threadsafe.
-func SetLevel(level zstd.EncoderLevel) error {
- c, ok := encoding.GetCompressor(Name).(*compressor)
- if !ok {
- return ErrNotInUse
- }
-
- enc, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(level))
- if err != nil {
- return err
- }
-
- c.encoder = enc
- return nil
-}
-
-func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) {
- return &zstdWriteCloser{
- enc: c.encoder,
- writer: w,
- }, nil
-}
-
-type zstdWriteCloser struct {
- enc *zstd.Encoder
- writer io.Writer // Compressed data will be written here.
- buf bytes.Buffer // Buffer uncompressed data here, compress on Close.
-}
-
-func (z *zstdWriteCloser) Write(p []byte) (int, error) {
- return z.buf.Write(p)
-}
-
-func (z *zstdWriteCloser) Close() error {
- compressed := z.enc.EncodeAll(z.buf.Bytes(), nil)
- _, err := io.Copy(z.writer, bytes.NewReader(compressed))
- return err
-}
-
-func (c *compressor) Decompress(r io.Reader) (io.Reader, error) {
- compressed, err := ioutil.ReadAll(r)
- if err != nil {
- return nil, err
- }
-
- uncompressed, err := c.decoder.DecodeAll(compressed, nil)
- if err != nil {
- return nil, err
- }
-
- return bytes.NewReader(uncompressed), nil
-}
-
-func (c *compressor) Name() string {
- return Name
-}
diff --git a/vendor/github.com/mostynb/go-grpc-compression/nonclobbering/zstd/zstd.go b/vendor/github.com/mostynb/go-grpc-compression/nonclobbering/zstd/zstd.go
deleted file mode 100644
index 18b94d93fb..0000000000
--- a/vendor/github.com/mostynb/go-grpc-compression/nonclobbering/zstd/zstd.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2023 Mostyn Bramley-Moore.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package github.com/mostynb/go-grpc-compression/nonclobbering/zstd is a
-// wrapper for using github.com/klauspost/compress/zstd with gRPC.
-//
-// If you import this package, it will only register itself as the encoder
-// for the "zstd" compressor if no other compressors have already been
-// registered with that name.
-//
-// If you do want to override previously registered "zstd" compressors,
-// then you should instead import
-// github.com/mostynb/go-grpc-compression/zstd
-package zstd
-
-import (
- internalzstd "github.com/mostynb/go-grpc-compression/internal/zstd"
-
- "github.com/klauspost/compress/zstd"
-)
-
-const Name = internalzstd.Name
-
-func init() {
- clobbering := false
- internalzstd.PretendInit(clobbering)
-}
-
-var ErrNotInUse = internalzstd.ErrNotInUse
-
-// SetLevel updates the registered compressor to use a particular compression
-// level. Returns ErrNotInUse if this module isn't registered (because it has
-// been overridden by another encoder with the same name), or any error
-// returned by zstd.NewWriter(nil, zstd.WithEncoderLevel(level).
-//
-// NOTE: this function is not threadsafe and must only be called from an init
-// function or from the main goroutine before any other goroutines have been
-// created.
-func SetLevel(level zstd.EncoderLevel) error {
- return internalzstd.SetLevel(level)
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider/provider.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider/provider.go
index b11f0cfc1b..437ac438e2 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider/provider.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provider/s3provider/provider.go
@@ -33,6 +33,26 @@ type provider struct {
client s3Client
}
+// NewFactory returns a new confmap.ProviderFactory that creates a confmap.Provider
+// which reads configuration from a file obtained from an s3 bucket.
+//
+// This Provider supports "s3" scheme, and can be called with a "uri" that follows:
+//
+// s3-uri : s3://[BUCKET].s3.[REGION].amazonaws.com/[KEY]
+//
+// One example for s3-uri be like: s3://doc-example-bucket.s3.us-west-2.amazonaws.com/photos/puppy.jpg
+// References: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
+//
+// Examples:
+// `s3://DOC-EXAMPLE-BUCKET.s3.us-west-2.amazonaws.com/photos/puppy.jpg` - (unix, windows)
+func NewFactory() confmap.ProviderFactory {
+ return confmap.NewProviderFactory(newWithSettings)
+}
+
+func newWithSettings(_ confmap.ProviderSettings) confmap.Provider {
+ return &provider{client: nil}
+}
+
// New returns a new confmap.Provider that reads the configuration from a file.
//
// This Provider supports "s3" scheme, and can be called with a "uri" that follows:
@@ -44,6 +64,8 @@ type provider struct {
//
// Examples:
// `s3://DOC-EXAMPLE-BUCKET.s3.us-west-2.amazonaws.com/photos/puppy.jpg` - (unix, windows)
+//
+// Deprecated: [v0.100.0] Use NewFactory() instead.
func New() confmap.Provider {
return &provider{client: nil}
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter/internal/metadata/generated_status.go
index 2b54cdf296..93e8c75eb2 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter/internal/metadata/generated_status.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter/internal/metadata/generated_status.go
@@ -4,8 +4,6 @@ package metadata
import (
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/trace"
)
var (
@@ -15,11 +13,3 @@ var (
const (
LogsStability = component.StabilityLevelBeta
)
-
-func Meter(settings component.TelemetrySettings) metric.Meter {
- return settings.MeterProvider.Meter("otelcol/awscloudwatchlogs")
-}
-
-func Tracer(settings component.TelemetrySettings) trace.Tracer {
- return settings.TracerProvider.Tracer("otelcol/awscloudwatchlogs")
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter/internal/metadata/generated_telemetry.go
new file mode 100644
index 0000000000..5e9b7bbe23
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter/internal/metadata/generated_telemetry.go
@@ -0,0 +1,17 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func Meter(settings component.TelemetrySettings) metric.Meter {
+ return settings.MeterProvider.Meter("otelcol/awscloudwatchlogs")
+}
+
+func Tracer(settings component.TelemetrySettings) trace.Tracer {
+ return settings.TracerProvider.Tracer("otelcol/awscloudwatchlogs")
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter/metadata.yaml
index 7b4422fa10..9a86f2927c 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter/metadata.yaml
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter/metadata.yaml
@@ -18,3 +18,8 @@ tests:
retry_on_failure:
enabled: false
expect_consumer_error: true
+ goleak:
+ ignore:
+ top:
+ # See https://github.com/census-instrumentation/opencensus-go/issues/1191 for more information.
+ - "go.opencensus.io/stats/view.(*worker).start"
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/internal/metadata/generated_status.go
index 5d050d2761..8b4abca7c2 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/internal/metadata/generated_status.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/internal/metadata/generated_status.go
@@ -4,8 +4,6 @@ package metadata
import (
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/trace"
)
var (
@@ -15,11 +13,3 @@ var (
const (
MetricsStability = component.StabilityLevelBeta
)
-
-func Meter(settings component.TelemetrySettings) metric.Meter {
- return settings.MeterProvider.Meter("otelcol/awsemf")
-}
-
-func Tracer(settings component.TelemetrySettings) trace.Tracer {
- return settings.TracerProvider.Tracer("otelcol/awsemf")
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/internal/metadata/generated_telemetry.go
new file mode 100644
index 0000000000..c089739348
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/internal/metadata/generated_telemetry.go
@@ -0,0 +1,17 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func Meter(settings component.TelemetrySettings) metric.Meter {
+ return settings.MeterProvider.Meter("otelcol/awsemf")
+}
+
+func Tracer(settings component.TelemetrySettings) trace.Tracer {
+ return settings.TracerProvider.Tracer("otelcol/awsemf")
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/metadata.yaml
index 62292f087f..e1eefcb962 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/metadata.yaml
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter/metadata.yaml
@@ -14,4 +14,6 @@ tests:
region: 'us-west-2'
resource_to_telemetry_conversion:
enabled: true
- expect_consumer_error: true
\ No newline at end of file
+ expect_consumer_error: true
+ goleak:
+ skip: true
\ No newline at end of file
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/README.md
index 22f5794831..03faa6f6e1 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/README.md
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/README.md
@@ -23,13 +23,9 @@ Trace IDs and Span IDs are expected to be originally generated by either AWS API
propagated by them using the `X-Amzn-Trace-Id` HTTP header. However, other generation sources are
supported by replacing fully-random Trace IDs with X-Ray formatted Trace IDs where necessary:
-> AWS X-Ray IDs are the same size as W3C Trace Context IDs but differ in that the first 32 bits of a Trace ID
-> is the Unix epoch time when the trace was started. Note that X-Ray only allows submission of Trace IDs from
-> the past 30 days, otherwise the trace is dropped by X-Ray. The Exporter will not validate this timestamp.
-
-This means that until X-Ray supports Trace Ids consisting of fully random bits, in order for spans to appear in X-Ray, the client SDK MUST use an X-Ray ID generator. For more
-information, see
-[configuring the X-Ray exporter](https://aws-otel.github.io/docs/getting-started/x-ray#configuring-the-aws-x-ray-exporter).
+> AWS X-Ray IDs in binary are 128 bits, the same size as W3C Trace Context IDs but the string is formatted to
+> “1-{8 digit hex}-{24 digit hex}“. For example, The W3C format trace ID “4bf92f3577b34da6a3ce929d0e0e4736” is
+> converted to the X-Ray format trace ID “1-4bf92f35-77b34da6a3ce929d0e0e4736".
The `http` object is populated when the `component` attribute value is `grpc` as well as `http`. Other
synchronous call types should also result in the `http` object being populated.
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/awsxray.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/awsxray.go
index 036903446a..f82bbb34bd 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/awsxray.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/awsxray.go
@@ -105,17 +105,21 @@ func extractResourceSpans(config component.Config, logger *zap.Logger, td ptrace
for j := 0; j < rspans.ScopeSpans().Len(); j++ {
spans := rspans.ScopeSpans().At(j).Spans()
for k := 0; k < spans.Len(); k++ {
- document, localErr := translator.MakeSegmentDocumentString(
+ documentsForSpan, localErr := translator.MakeSegmentDocuments(
spans.At(k), resource,
config.(*Config).IndexedAttributes,
config.(*Config).IndexAllAttributes,
config.(*Config).LogGroupNames,
config.(*Config).skipTimestampValidation)
+
if localErr != nil {
logger.Debug("Error translating span.", zap.Error(localErr))
continue
}
- documents = append(documents, &document)
+
+ for l := range documentsForSpan {
+ documents = append(documents, &documentsForSpan[l])
+ }
}
}
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/internal/metadata/generated_status.go
index 63420257a6..e5d7462d56 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/internal/metadata/generated_status.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/internal/metadata/generated_status.go
@@ -4,8 +4,6 @@ package metadata
import (
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/trace"
)
var (
@@ -15,11 +13,3 @@ var (
const (
TracesStability = component.StabilityLevelBeta
)
-
-func Meter(settings component.TelemetrySettings) metric.Meter {
- return settings.MeterProvider.Meter("otelcol/awsxray")
-}
-
-func Tracer(settings component.TelemetrySettings) trace.Tracer {
- return settings.TracerProvider.Tracer("otelcol/awsxray")
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/internal/metadata/generated_telemetry.go
new file mode 100644
index 0000000000..4052418953
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/internal/metadata/generated_telemetry.go
@@ -0,0 +1,17 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func Meter(settings component.TelemetrySettings) metric.Meter {
+ return settings.MeterProvider.Meter("otelcol/awsxray")
+}
+
+func Tracer(settings component.TelemetrySettings) trace.Tracer {
+ return settings.TracerProvider.Tracer("otelcol/awsxray")
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/internal/translator/segment.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/internal/translator/segment.go
index 0259fd891b..9460f1ce5c 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/internal/translator/segment.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/internal/translator/segment.go
@@ -37,20 +37,27 @@ const (
// x-ray only span attributes - https://github.com/open-telemetry/opentelemetry-java-contrib/pull/802
const (
- awsLocalService = "aws.local.service"
- awsRemoteService = "aws.remote.service"
+ awsLocalService = "aws.local.service"
+ awsRemoteService = "aws.remote.service"
+ awsLocalOperation = "aws.local.operation"
+ awsRemoteOperation = "aws.remote.operation"
+ remoteTarget = "remoteTarget"
+ awsSpanKind = "aws.span.kind"
+ k8sRemoteNamespace = "K8s.RemoteNamespace"
)
var (
// reInvalidSpanCharacters defines the invalid letters in a span name as per
- // https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html
- reInvalidSpanCharacters = regexp.MustCompile(`[^ 0-9\p{L}N_.:/%=+,\-@]`)
+ // Allowed characters for X-Ray Segment Name:
+ // Unicode letters, numbers, and whitespace, and the following symbols: _, ., :, /, %, &, #, =, +, \, -, @
+ // Doc: https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html
+ reInvalidSpanCharacters = regexp.MustCompile(`[^ 0-9\p{L}N_.:/%=+\-@]`)
)
var (
remoteXrayExporterDotConverter = featuregate.GlobalRegistry().MustRegister(
"exporter.xray.allowDot",
- featuregate.StageAlpha,
+ featuregate.StageBeta,
featuregate.WithRegisterDescription("X-Ray Exporter will no longer convert . to _ in annotation keys when this feature gate is enabled. "),
featuregate.WithRegisterFromVersion("v0.97.0"),
)
@@ -72,16 +79,233 @@ const (
identifierOffset = 11 // offset of identifier within traceID
)
+const (
+ localRoot = "LOCAL_ROOT"
+)
+
+var removeAnnotationsFromServiceSegment = []string{
+ awsRemoteService,
+ awsRemoteOperation,
+ remoteTarget,
+ k8sRemoteNamespace,
+}
+
var (
writers = newWriterPool(2048)
)
-// MakeSegmentDocumentString converts an OpenTelemetry Span to an X-Ray Segment and then serialzies to JSON
+// MakeSegmentDocuments converts spans to json documents
+func MakeSegmentDocuments(span ptrace.Span, resource pcommon.Resource, indexedAttrs []string, indexAllAttrs bool, logGroupNames []string, skipTimestampValidation bool) ([]string, error) {
+ segments, err := MakeSegmentsFromSpan(span, resource, indexedAttrs, indexAllAttrs, logGroupNames, skipTimestampValidation)
+
+ if err == nil {
+ var documents []string
+
+ for _, v := range segments {
+ document, documentErr := MakeDocumentFromSegment(v)
+ if documentErr != nil {
+ return nil, documentErr
+ }
+
+ documents = append(documents, document)
+ }
+
+ return documents, nil
+ }
+
+ return nil, err
+}
+
+func isLocalRootSpanADependencySpan(span ptrace.Span) bool {
+ return span.Kind() != ptrace.SpanKindServer &&
+ span.Kind() != ptrace.SpanKindInternal
+}
+
+// isLocalRoot - we will move to using isRemote once the collector supports deserializing it. Until then, we will rely on aws.span.kind.
+func isLocalRoot(span ptrace.Span) bool {
+ if myAwsSpanKind, ok := span.Attributes().Get(awsSpanKind); ok {
+ return localRoot == myAwsSpanKind.Str()
+ }
+
+ return false
+}
+
+func addNamespaceToSubsegmentWithRemoteService(span ptrace.Span, segment *awsxray.Segment) {
+ if (span.Kind() == ptrace.SpanKindClient ||
+ span.Kind() == ptrace.SpanKindConsumer ||
+ span.Kind() == ptrace.SpanKindProducer) &&
+ segment.Type != nil &&
+ segment.Namespace == nil {
+ if _, ok := span.Attributes().Get(awsRemoteService); ok {
+ segment.Namespace = awsxray.String("remote")
+ }
+ }
+}
+
+func MakeDependencySubsegmentForLocalRootDependencySpan(span ptrace.Span, resource pcommon.Resource, indexedAttrs []string, indexAllAttrs bool, logGroupNames []string, skipTimestampValidation bool, serviceSegmentID pcommon.SpanID) (*awsxray.Segment, error) {
+ var dependencySpan = ptrace.NewSpan()
+ span.CopyTo(dependencySpan)
+
+ dependencySpan.SetParentSpanID(serviceSegmentID)
+
+ dependencySubsegment, err := MakeSegment(dependencySpan, resource, indexedAttrs, indexAllAttrs, logGroupNames, skipTimestampValidation)
+
+ if err != nil {
+ return nil, err
+ }
+
+ // Make this a subsegment
+ dependencySubsegment.Type = awsxray.String("subsegment")
+
+ if dependencySubsegment.Namespace == nil {
+ dependencySubsegment.Namespace = awsxray.String("remote")
+ }
+
+ // Remove span links from consumer spans
+ if span.Kind() == ptrace.SpanKindConsumer {
+ dependencySubsegment.Links = nil
+ }
+
+ if myAwsRemoteService, ok := span.Attributes().Get(awsRemoteService); ok {
+ subsegmentName := myAwsRemoteService.Str()
+ dependencySubsegment.Name = awsxray.String(trimAwsSdkPrefix(subsegmentName, span))
+ }
+
+ return dependencySubsegment, err
+}
+
+func MakeServiceSegmentForLocalRootDependencySpan(span ptrace.Span, resource pcommon.Resource, indexedAttrs []string, indexAllAttrs bool, logGroupNames []string, skipTimestampValidation bool, serviceSegmentID pcommon.SpanID) (*awsxray.Segment, error) {
+ // We always create a segment for the service
+ var serviceSpan ptrace.Span = ptrace.NewSpan()
+ span.CopyTo(serviceSpan)
+
+ // Set the span id to the one internally generated
+ serviceSpan.SetSpanID(serviceSegmentID)
+
+ for _, v := range removeAnnotationsFromServiceSegment {
+ serviceSpan.Attributes().Remove(v)
+ }
+
+ serviceSegment, err := MakeSegment(serviceSpan, resource, indexedAttrs, indexAllAttrs, logGroupNames, skipTimestampValidation)
+
+ if err != nil {
+ return nil, err
+ }
+
+ // Set the name
+ if myAwsLocalService, ok := span.Attributes().Get(awsLocalService); ok {
+ serviceSegment.Name = awsxray.String(myAwsLocalService.Str())
+ }
+
+ // Remove the HTTP field
+ serviceSegment.HTTP = nil
+
+ // Remove AWS subsegment fields
+ serviceSegment.AWS.Operation = nil
+ serviceSegment.AWS.AccountID = nil
+ serviceSegment.AWS.RemoteRegion = nil
+ serviceSegment.AWS.RequestID = nil
+ serviceSegment.AWS.QueueURL = nil
+ serviceSegment.AWS.TableName = nil
+ serviceSegment.AWS.TableNames = nil
+
+ // Delete all metadata that does not start with 'otel.resource.'
+ for _, metaDataEntry := range serviceSegment.Metadata {
+ for key := range metaDataEntry {
+ if !strings.HasPrefix(key, "otel.resource.") {
+ delete(metaDataEntry, key)
+ }
+ }
+ }
+
+ // Make it a segment
+ serviceSegment.Type = nil
+
+ // Remote namespace
+ serviceSegment.Namespace = nil
+
+ // Remove span links from non-consumer spans
+ if span.Kind() != ptrace.SpanKindConsumer {
+ serviceSegment.Links = nil
+ }
+
+ return serviceSegment, nil
+}
+
+func MakeServiceSegmentForLocalRootSpanWithoutDependency(span ptrace.Span, resource pcommon.Resource, indexedAttrs []string, indexAllAttrs bool, logGroupNames []string, skipTimestampValidation bool) ([]*awsxray.Segment, error) {
+ segment, err := MakeSegment(span, resource, indexedAttrs, indexAllAttrs, logGroupNames, skipTimestampValidation)
+
+ if err != nil {
+ return nil, err
+ }
+
+ segment.Type = nil
+ segment.Namespace = nil
+
+ return []*awsxray.Segment{segment}, err
+}
+
+func MakeNonLocalRootSegment(span ptrace.Span, resource pcommon.Resource, indexedAttrs []string, indexAllAttrs bool, logGroupNames []string, skipTimestampValidation bool) ([]*awsxray.Segment, error) {
+ segment, err := MakeSegment(span, resource, indexedAttrs, indexAllAttrs, logGroupNames, skipTimestampValidation)
+
+ if err != nil {
+ return nil, err
+ }
+
+ addNamespaceToSubsegmentWithRemoteService(span, segment)
+
+ return []*awsxray.Segment{segment}, nil
+}
+
+func MakeServiceSegmentAndDependencySubsegment(span ptrace.Span, resource pcommon.Resource, indexedAttrs []string, indexAllAttrs bool, logGroupNames []string, skipTimestampValidation bool) ([]*awsxray.Segment, error) {
+ // If it is a local root span and a dependency span, we need to make a segment and subsegment representing the local service and remote service, respectively.
+ var serviceSegmentID = newSegmentID()
+ var segments []*awsxray.Segment
+
+ // Make Dependency Subsegment
+ dependencySubsegment, err := MakeDependencySubsegmentForLocalRootDependencySpan(span, resource, indexedAttrs, indexAllAttrs, logGroupNames, skipTimestampValidation, serviceSegmentID)
+ if err != nil {
+ return nil, err
+ }
+ segments = append(segments, dependencySubsegment)
+
+ // Make Service Segment
+ serviceSegment, err := MakeServiceSegmentForLocalRootDependencySpan(span, resource, indexedAttrs, indexAllAttrs, logGroupNames, skipTimestampValidation, serviceSegmentID)
+ if err != nil {
+ return nil, err
+ }
+ segments = append(segments, serviceSegment)
+
+ return segments, err
+}
+
+// MakeSegmentsFromSpan creates one or more segments from a span
+func MakeSegmentsFromSpan(span ptrace.Span, resource pcommon.Resource, indexedAttrs []string, indexAllAttrs bool, logGroupNames []string, skipTimestampValidation bool) ([]*awsxray.Segment, error) {
+ if !isLocalRoot(span) {
+ return MakeNonLocalRootSegment(span, resource, indexedAttrs, indexAllAttrs, logGroupNames, skipTimestampValidation)
+ }
+
+ if !isLocalRootSpanADependencySpan(span) {
+ return MakeServiceSegmentForLocalRootSpanWithoutDependency(span, resource, indexedAttrs, indexAllAttrs, logGroupNames, skipTimestampValidation)
+ }
+
+ return MakeServiceSegmentAndDependencySubsegment(span, resource, indexedAttrs, indexAllAttrs, logGroupNames, skipTimestampValidation)
+}
+
+// MakeSegmentDocumentString converts an OpenTelemetry Span to an X-Ray Segment and then serializes to JSON
+// MakeSegmentDocumentString will be deprecated in the future
func MakeSegmentDocumentString(span ptrace.Span, resource pcommon.Resource, indexedAttrs []string, indexAllAttrs bool, logGroupNames []string, skipTimestampValidation bool) (string, error) {
segment, err := MakeSegment(span, resource, indexedAttrs, indexAllAttrs, logGroupNames, skipTimestampValidation)
+
if err != nil {
return "", err
}
+
+ return MakeDocumentFromSegment(segment)
+}
+
+// MakeDocumentFromSegment converts a segment into a JSON document
+func MakeDocumentFromSegment(segment *awsxray.Segment) (string, error) {
w := writers.borrow()
if err := w.Encode(*segment); err != nil {
return "", err
@@ -142,18 +366,24 @@ func MakeSegment(span ptrace.Span, resource pcommon.Resource, indexedAttrs []str
// X-Ray segment names are service names, unlike span names which are methods. Try to find a service name.
// support x-ray specific service name attributes as segment name if it exists
- if span.Kind() == ptrace.SpanKindServer || span.Kind() == ptrace.SpanKindConsumer {
+ if span.Kind() == ptrace.SpanKindServer {
+ if localServiceName, ok := attributes.Get(awsLocalService); ok {
+ name = localServiceName.Str()
+ }
+ }
+
+ myAwsSpanKind, _ := span.Attributes().Get(awsSpanKind)
+ if span.Kind() == ptrace.SpanKindInternal && myAwsSpanKind.Str() == localRoot {
if localServiceName, ok := attributes.Get(awsLocalService); ok {
name = localServiceName.Str()
}
}
- if span.Kind() == ptrace.SpanKindClient || span.Kind() == ptrace.SpanKindProducer {
+
+ if span.Kind() == ptrace.SpanKindClient || span.Kind() == ptrace.SpanKindProducer || span.Kind() == ptrace.SpanKindConsumer {
if remoteServiceName, ok := attributes.Get(awsRemoteService); ok {
name = remoteServiceName.Str()
// only strip the prefix for AWS spans
- if isAwsSdkSpan(span) && strings.HasPrefix(name, "AWS.SDK.") {
- name = strings.TrimPrefix(name, "AWS.SDK.")
- }
+ name = trimAwsSdkPrefix(name, span)
}
}
@@ -188,7 +418,10 @@ func MakeSegment(span ptrace.Span, resource pcommon.Resource, indexedAttrs []str
// For database queries, the segment name convention is @
name = dbInstance.Str()
if dbURL, ok := attributes.Get(conventions.AttributeDBConnectionString); ok {
- if parsed, _ := url.Parse(dbURL.Str()); parsed != nil {
+ // Trim JDBC connection string if starts with "jdbc:", otherwise no change
+ // jdbc:mysql://db.dev.example.com:3306
+ dbURLStr := strings.TrimPrefix(dbURL.Str(), "jdbc:")
+ if parsed, _ := url.Parse(dbURLStr); parsed != nil {
if parsed.Hostname() != "" {
name += "@" + parsed.Hostname()
}
@@ -535,3 +768,10 @@ func fixAnnotationKey(key string) string {
}
}, key)
}
+
+func trimAwsSdkPrefix(name string, span ptrace.Span) string {
+ if isAwsSdkSpan(span) && strings.HasPrefix(name, "AWS.SDK.") {
+ return strings.TrimPrefix(name, "AWS.SDK.")
+ }
+ return name
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/metadata.yaml
index 684c17f946..5a630856f0 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/metadata.yaml
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/metadata.yaml
@@ -12,4 +12,6 @@ status:
tests:
config:
region: 'us-west-2'
- expect_consumer_error: true
\ No newline at end of file
+ expect_consumer_error: true
+ goleak:
+ skip: true
\ No newline at end of file
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/README.md
index c4ae28bdec..5265fecd8f 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/README.md
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/README.md
@@ -7,7 +7,7 @@
| | [beta]: traces, metrics |
| Distributions | [contrib] |
| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fdatadog%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fdatadog) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fdatadog%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fdatadog) |
-| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@mx-psi](https://www.github.com/mx-psi), [@dineshg13](https://www.github.com/dineshg13), [@liustanley](https://www.github.com/liustanley), [@songy23](https://www.github.com/songy23), [@mackjmr](https://www.github.com/mackjmr) |
+| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@mx-psi](https://www.github.com/mx-psi), [@dineshg13](https://www.github.com/dineshg13), [@liustanley](https://www.github.com/liustanley), [@songy23](https://www.github.com/songy23), [@mackjmr](https://www.github.com/mackjmr), [@ankitpatel96](https://www.github.com/ankitpatel96) |
| Emeritus | [@gbbr](https://www.github.com/gbbr) |
[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/agent_components.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/agent_components.go
new file mode 100644
index 0000000000..7d81c193d0
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/agent_components.go
@@ -0,0 +1,50 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package datadogexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter"
+
+import (
+ "strings"
+
+ coreconfig "github.com/DataDog/datadog-agent/comp/core/config"
+ "github.com/DataDog/datadog-agent/comp/core/log"
+ pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model"
+ pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup"
+ "go.opentelemetry.io/collector/component"
+)
+
+func newLogComponent(set component.TelemetrySettings) log.Component {
+ zlog := &zaplogger{
+ logger: set.Logger,
+ }
+ return zlog
+}
+
+func newConfigComponent(set component.TelemetrySettings, cfg *Config) coreconfig.Component {
+ pkgconfig := pkgconfigmodel.NewConfig("DD", "DD", strings.NewReplacer(".", "_"))
+
+ // Set the API Key
+ pkgconfig.Set("api_key", string(cfg.API.Key), pkgconfigmodel.SourceFile)
+ pkgconfig.Set("site", cfg.API.Site, pkgconfigmodel.SourceFile)
+ pkgconfig.Set("logs_enabled", true, pkgconfigmodel.SourceDefault)
+ pkgconfig.Set("log_level", set.Logger.Level().String(), pkgconfigmodel.SourceFile)
+ pkgconfig.Set("logs_config.batch_wait", cfg.Logs.BatchWait, pkgconfigmodel.SourceFile)
+ pkgconfig.Set("logs_config.use_compression", cfg.Logs.UseCompression, pkgconfigmodel.SourceFile)
+ pkgconfig.Set("logs_config.compression_level", cfg.Logs.CompressionLevel, pkgconfigmodel.SourceFile)
+ pkgconfig.Set("logs_config.logs_dd_url", cfg.Logs.TCPAddrConfig.Endpoint, pkgconfigmodel.SourceFile)
+ pkgconfig.Set("logs_config.auditor_ttl", pkgconfigsetup.DefaultAuditorTTL, pkgconfigmodel.SourceDefault)
+ pkgconfig.Set("logs_config.batch_max_content_size", pkgconfigsetup.DefaultBatchMaxContentSize, pkgconfigmodel.SourceDefault)
+ pkgconfig.Set("logs_config.batch_max_size", pkgconfigsetup.DefaultBatchMaxSize, pkgconfigmodel.SourceDefault)
+ pkgconfig.Set("logs_config.input_chan_size", pkgconfigsetup.DefaultInputChanSize, pkgconfigmodel.SourceDefault)
+ pkgconfig.Set("logs_config.max_message_size_bytes", pkgconfigsetup.DefaultMaxMessageSizeBytes, pkgconfigmodel.SourceDefault)
+ pkgconfig.Set("logs_config.run_path", "/opt/datadog-agent/run", pkgconfigmodel.SourceDefault)
+ pkgconfig.Set("logs_config.sender_backoff_factor", pkgconfigsetup.DefaultLogsSenderBackoffFactor, pkgconfigmodel.SourceDefault)
+ pkgconfig.Set("logs_config.sender_backoff_base", pkgconfigsetup.DefaultLogsSenderBackoffBase, pkgconfigmodel.SourceDefault)
+ pkgconfig.Set("logs_config.sender_backoff_max", pkgconfigsetup.DefaultLogsSenderBackoffMax, pkgconfigmodel.SourceDefault)
+ pkgconfig.Set("logs_config.sender_recovery_interval", pkgconfigsetup.DefaultForwarderRecoveryInterval, pkgconfigmodel.SourceDefault)
+ pkgconfig.Set("logs_config.stop_grace_period", 30, pkgconfigmodel.SourceDefault)
+ pkgconfig.Set("logs_config.use_v2_api", true, pkgconfigmodel.SourceDefault)
+ pkgconfig.SetKnown("logs_config.dev_mode_no_ssl")
+
+ return pkgconfig
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/config.go
index 9ebd116980..631af88b4d 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/config.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/config.go
@@ -273,8 +273,17 @@ type TracesConfig struct {
// If set to true, enables an additional stats computation check on spans to see they have an eligible `span.kind` (server, consumer, client, producer).
// If enabled, a span with an eligible `span.kind` will have stats computed. If disabled, only top-level and measured spans will have stats computed.
// NOTE: For stats computed from OTel traces, only top-level spans are considered when this option is off.
+ // If you are sending OTel traces and want stats on non-top-level spans, this flag will need to be enabled.
+ // If you are sending OTel traces and do not want stats computed by span kind, you need to disable this flag and disable `compute_top_level_by_span_kind`.
ComputeStatsBySpanKind bool `mapstructure:"compute_stats_by_span_kind"`
+ // If set to true, root spans and spans with a server or consumer `span.kind` will be marked as top-level.
+ // Additionally, spans with a client or producer `span.kind` will have stats computed.
+ // Enabling this config option may increase the number of spans that generate trace metrics, and may change which spans appear as top-level in Datadog.
+ // ComputeTopLevelBySpanKind needs to be enabled in both the Datadog connector and Datadog exporter configs if both components are being used.
+ // The default value is `false`.
+ ComputeTopLevelBySpanKind bool `mapstructure:"compute_top_level_by_span_kind"`
+
// If set to true, enables `peer.service` aggregation in the exporter. If disabled, aggregated trace stats will not include `peer.service` as a dimension.
// For the best experience with `peer.service`, it is recommended to also enable `compute_stats_by_span_kind`.
// If enabling both causes the datadog exporter to consume too many resources, try disabling `compute_stats_by_span_kind` first.
@@ -314,7 +323,21 @@ type LogsConfig struct {
confignet.TCPAddrConfig `mapstructure:",squash"`
// DumpPayloads report whether payloads should be dumped when logging level is debug.
+ // Note: this config option does not apply when enabling the `exporter.datadogexporter.UseLogsAgentExporter` feature flag.
DumpPayloads bool `mapstructure:"dump_payloads"`
+
+ // UseCompression enables the logs agent to compress logs before sending them.
+ // Note: this config option does not apply unless enabling the `exporter.datadogexporter.UseLogsAgentExporter` feature flag.
+ UseCompression bool `mapstructure:"use_compression"`
+
+ // CompressionLevel accepts values from 0 (no compression) to 9 (maximum compression but higher resource usage).
+ // Only takes effect if UseCompression is set to true.
+ // Note: this config option does not apply unless enabling the `exporter.datadogexporter.UseLogsAgentExporter` feature flag.
+ CompressionLevel int `mapstructure:"compression_level"`
+
+ // BatchWait represents the maximum time the logs agent waits to fill each batch of logs before sending.
+ // Note: this config option does not apply unless enabling the `exporter.datadogexporter.UseLogsAgentExporter` feature flag.
+ BatchWait int `mapstructure:"batch_wait"`
}
// TagsConfig defines the tag-related configuration
@@ -626,5 +649,24 @@ func (c *Config) Unmarshal(configMap *confmap.Conf) error {
initialValueSetting, cumulMonoMode, CumulativeMonotonicSumModeToDelta)
}
+ logsExporterSettings := []struct {
+ setting string
+ valid bool
+ }{
+ {setting: "logs::dump_payloads", valid: !isLogsAgentExporterEnabled()},
+ {setting: "logs::use_compression", valid: isLogsAgentExporterEnabled()},
+ {setting: "logs::compression_level", valid: isLogsAgentExporterEnabled()},
+ {setting: "logs::batch_wait", valid: isLogsAgentExporterEnabled()},
+ }
+ for _, logsExporterSetting := range logsExporterSettings {
+ if configMap.IsSet(logsExporterSetting.setting) && !logsExporterSetting.valid {
+ enabledText := "enabled"
+ if !isLogsAgentExporterEnabled() {
+ enabledText = "disabled"
+ }
+ return fmt.Errorf("%v is not valid when the exporter.datadogexporter.UseLogsAgentExporter feature gate is %v", logsExporterSetting.setting, enabledText)
+ }
+ }
+
return nil
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/factory.go
index a630cfc924..816d73cb66 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/factory.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/factory.go
@@ -10,6 +10,8 @@ import (
"sync"
"time"
+ "github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline"
+ "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient"
pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace"
"github.com/DataDog/datadog-agent/pkg/trace/agent"
"github.com/DataDog/datadog-agent/pkg/trace/telemetry"
@@ -35,10 +37,16 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/hostmetadata"
"github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metadata"
- "github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry"
)
+var logsAgentExporterFeatureGate = featuregate.GlobalRegistry().MustRegister(
+ "exporter.datadogexporter.UseLogsAgentExporter",
+ featuregate.StageAlpha,
+ featuregate.WithRegisterDescription("When enabled, datadogexporter uses the Datadog agent logs pipeline for exporting logs."),
+ featuregate.WithRegisterFromVersion("v0.100.0"),
+)
+
var metricExportNativeClientFeatureGate = featuregate.GlobalRegistry().MustRegister(
"exporter.datadogexporter.metricexportnativeclient",
featuregate.StageBeta,
@@ -57,6 +65,10 @@ func isMetricExportV2Enabled() bool {
return metricExportNativeClientFeatureGate.IsEnabled()
}
+func isLogsAgentExporterEnabled() bool {
+ return logsAgentExporterFeatureGate.IsEnabled()
+}
+
// enableNativeMetricExport switches metric export to call native Datadog APIs instead of Zorkian APIs.
func enableNativeMetricExport() error {
return featuregate.GlobalRegistry().Set(metricExportNativeClientFeatureGate.ID(), true)
@@ -91,8 +103,6 @@ type factory struct {
attributesTranslator *attributes.Translator
attributesErr error
- wg sync.WaitGroup // waits for agent to exit
-
registry *featuregate.Registry
}
@@ -138,14 +148,14 @@ func (f *factory) StopReporter() {
})
}
-func (f *factory) TraceAgent(ctx context.Context, params exporter.CreateSettings, cfg *Config, sourceProvider source.Provider, attrsTranslator *attributes.Translator) (*agent.Agent, error) {
- agnt, err := newTraceAgent(ctx, params, cfg, sourceProvider, datadog.InitializeMetricClient(params.MeterProvider, datadog.ExporterSourceTag), attrsTranslator)
+func (f *factory) TraceAgent(ctx context.Context, wg *sync.WaitGroup, params exporter.CreateSettings, cfg *Config, sourceProvider source.Provider, attrsTranslator *attributes.Translator) (*agent.Agent, error) {
+ agnt, err := newTraceAgent(ctx, params, cfg, sourceProvider, metricsclient.InitializeMetricClient(params.MeterProvider, metricsclient.ExporterSourceTag), attrsTranslator)
if err != nil {
return nil, err
}
- f.wg.Add(1)
+ wg.Add(1)
go func() {
- defer f.wg.Done()
+ defer wg.Done()
agnt.Run()
}()
return agnt, nil
@@ -218,6 +228,9 @@ func (f *factory) createDefaultConfig() component.Config {
TCPAddrConfig: confignet.TCPAddrConfig{
Endpoint: "https://http-intake.logs.datadoghq.com",
},
+ UseCompression: true,
+ CompressionLevel: 6,
+ BatchWait: 5,
},
HostMetadata: HostMetadataConfig{
@@ -238,11 +251,11 @@ func checkAndCastConfig(c component.Config, logger *zap.Logger) *Config {
return cfg
}
-func (f *factory) consumeStatsPayload(ctx context.Context, statsIn <-chan []byte, statsToAgent chan<- *pb.StatsPayload, tracerVersion string, agentVersion string, logger *zap.Logger) {
+func (f *factory) consumeStatsPayload(ctx context.Context, wg *sync.WaitGroup, statsIn <-chan []byte, statsToAgent chan<- *pb.StatsPayload, tracerVersion string, agentVersion string, logger *zap.Logger) {
for i := 0; i < runtime.NumCPU(); i++ {
- f.wg.Add(1)
+ wg.Add(1)
go func() {
- defer f.wg.Done()
+ defer wg.Done()
for {
select {
case <-ctx.Done():
@@ -290,14 +303,18 @@ func (f *factory) createMetricsExporter(
return nil, fmt.Errorf("failed to build attributes translator: %w", err)
}
- var pushMetricsFn consumer.ConsumeMetricsFunc
+ var (
+ pushMetricsFn consumer.ConsumeMetricsFunc
+ wg sync.WaitGroup // waits for consumeStatsPayload to exit
+ )
+
acfg, err := newTraceAgentConfig(ctx, set, cfg, hostProvider, attrsTranslator)
if err != nil {
cancel()
return nil, err
}
statsToAgent := make(chan *pb.StatsPayload)
- metricsClient := datadog.InitializeMetricClient(set.MeterProvider, datadog.ExporterSourceTag)
+ metricsClient := metricsclient.InitializeMetricClient(set.MeterProvider, metricsclient.ExporterSourceTag)
timingReporter := timing.New(metricsClient)
statsWriter := writer.NewStatsWriter(acfg, statsToAgent, telemetry.NewNoopCollector(), metricsClient, timingReporter)
@@ -306,7 +323,7 @@ func (f *factory) createMetricsExporter(
statsIn := make(chan []byte, 1000)
statsv := set.BuildInfo.Command + set.BuildInfo.Version
- f.consumeStatsPayload(ctx, statsIn, statsToAgent, statsv, acfg.AgentVersion, set.Logger)
+ f.consumeStatsPayload(ctx, &wg, statsIn, statsToAgent, statsv, acfg.AgentVersion, set.Logger)
pcfg := newMetadataConfigfromConfig(cfg)
metadataReporter, err := f.Reporter(set, pcfg)
if err != nil {
@@ -333,10 +350,10 @@ func (f *factory) createMetricsExporter(
return nil
}
} else {
- exp, metricsErr := newMetricsExporter(ctx, set, cfg, acfg, &f.onceMetadata, attrsTranslator, hostProvider, statsToAgent, metadataReporter, statsIn)
+ exp, metricsErr := newMetricsExporter(ctx, set, cfg, acfg, &f.onceMetadata, attrsTranslator, hostProvider, metadataReporter, statsIn)
if metricsErr != nil {
- cancel() // first cancel context
- f.wg.Wait() // then wait for shutdown
+ cancel() // first cancel context
+ wg.Wait() // then wait for shutdown
return nil, metricsErr
}
pushMetricsFn = exp.PushMetricsDataScrubbed
@@ -355,8 +372,8 @@ func (f *factory) createMetricsExporter(
exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: true}),
exporterhelper.WithQueue(cfg.QueueSettings),
exporterhelper.WithShutdown(func(context.Context) error {
- cancel() // first cancel context
- f.wg.Wait() // then wait for shutdown
+ cancel() // first cancel context
+ wg.Wait() // then wait for shutdown
f.StopReporter()
statsWriter.Stop()
if statsIn != nil {
@@ -393,6 +410,7 @@ func (f *factory) createTracesExporter(
var (
pusher consumer.ConsumeTracesFunc
stop component.ShutdownFunc
+ wg sync.WaitGroup // waits for agent to exit
)
hostProvider, err := f.SourceProvider(set.TelemetrySettings, cfg.Hostname)
@@ -408,7 +426,7 @@ func (f *factory) createTracesExporter(
return nil, fmt.Errorf("failed to build attributes translator: %w", err)
}
- traceagent, err := f.TraceAgent(ctx, set, cfg, hostProvider, attrsTranslator)
+ traceagent, err := f.TraceAgent(ctx, &wg, set, cfg, hostProvider, attrsTranslator)
if err != nil {
cancel()
return nil, fmt.Errorf("failed to start trace-agent: %w", err)
@@ -447,7 +465,7 @@ func (f *factory) createTracesExporter(
tracex, err2 := newTracesExporter(ctx, set, cfg, &f.onceMetadata, hostProvider, traceagent, metadataReporter)
if err2 != nil {
cancel()
- f.wg.Wait() // then wait for shutdown
+ wg.Wait() // then wait for shutdown
return nil, err2
}
pusher = tracex.consumeTraces
@@ -481,6 +499,7 @@ func (f *factory) createLogsExporter(
cfg := checkAndCastConfig(c, set.TelemetrySettings.Logger)
var pusher consumer.ConsumeLogsFunc
+ var logsAgent logsagentpipeline.LogsAgent
hostProvider, err := f.SourceProvider(set.TelemetrySettings, cfg.Hostname)
if err != nil {
return nil, fmt.Errorf("failed to build hostname provider: %w", err)
@@ -501,7 +520,8 @@ func (f *factory) createLogsExporter(
return nil, fmt.Errorf("failed to build attributes translator: %w", err)
}
- if cfg.OnlyMetadata {
+ switch {
+ case cfg.OnlyMetadata:
// only host metadata needs to be sent, once.
pusher = func(_ context.Context, td plog.Logs) error {
f.onceMetadata.Do(func() {
@@ -514,11 +534,18 @@ func (f *factory) createLogsExporter(
}
return nil
}
- } else {
+ case isLogsAgentExporterEnabled():
+ la, exp, err := newLogsAgentExporter(ctx, set, cfg, hostProvider)
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+ logsAgent = la
+ pusher = exp.ConsumeLogs
+ default:
exp, err := newLogsExporter(ctx, set, cfg, &f.onceMetadata, attributesTranslator, hostProvider, metadataReporter)
if err != nil {
cancel()
- f.wg.Wait() // then wait for shutdown
return nil, err
}
pusher = exp.consumeLogs
@@ -535,6 +562,9 @@ func (f *factory) createLogsExporter(
exporterhelper.WithShutdown(func(context.Context) error {
cancel()
f.StopReporter()
+ if logsAgent != nil {
+ return logsAgent.Stop(ctx)
+ }
return nil
}),
)
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/hostmetadata/metadata.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/hostmetadata/metadata.go
index 700f202691..93b868198e 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/hostmetadata/metadata.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/hostmetadata/metadata.go
@@ -7,6 +7,7 @@ package hostmetadata // import "github.com/open-telemetry/opentelemetry-collecto
import (
"bytes"
+ "compress/gzip"
"context"
"encoding/json"
"fmt"
@@ -91,9 +92,27 @@ func fillHostMetadata(params exporter.CreateSettings, pcfg PusherConfig, p sourc
func (p *pusher) pushMetadata(hm payload.HostMetadata) error {
path := p.pcfg.MetricsEndpoint + "/intake"
- buf, _ := json.Marshal(hm)
- req, _ := http.NewRequest(http.MethodPost, path, bytes.NewBuffer(buf))
+ marshaled, err := json.Marshal(hm)
+ if err != nil {
+ return fmt.Errorf("error marshaling metadata payload: %w", err)
+ }
+
+ var buf bytes.Buffer
+ g := gzip.NewWriter(&buf)
+ if _, err = g.Write(marshaled); err != nil {
+ return fmt.Errorf("error compressing metadata payload: %w", err)
+ }
+ if err = g.Close(); err != nil {
+ return fmt.Errorf("error closing gzip writer: %w", err)
+ }
+
+ req, err := http.NewRequest(http.MethodPost, path, &buf)
+ if err != nil {
+ return fmt.Errorf("error creating metadata request: %w", err)
+ }
+
clientutil.SetDDHeaders(req.Header, p.params.BuildInfo, p.pcfg.APIKey)
+ // Set the content type to JSON and the content encoding to gzip
clientutil.SetExtraHeaders(req.Header, clientutil.JSONHeaders)
resp, err := p.httpClient.Do(req)
@@ -163,7 +182,7 @@ func RunPusher(ctx context.Context, params exporter.CreateSettings, pcfg PusherC
// All fields that are being filled in by our exporter
// do not change over time. If this ever changes `hostMetadata`
// *must* be deep copied before calling `fillHostMetadata`.
- hostMetadata := payload.HostMetadata{Meta: &payload.Meta{}, Tags: &payload.HostTags{}}
+ hostMetadata := payload.NewEmpty()
if pcfg.UseResourceMetadata {
hostMetadata = metadataFromAttributes(attrs)
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/logs/hostnameimpl.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/logs/hostnameimpl.go
new file mode 100644
index 0000000000..e801dc57e5
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/logs/hostnameimpl.go
@@ -0,0 +1,61 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package logs // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/logs"
+
+import (
+ "context"
+
+ "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface"
+ "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source"
+)
+
+type service struct {
+ provider source.Provider
+}
+
+var _ hostnameinterface.Component = (*service)(nil)
+
+// Get returns the hostname.
+func (hs *service) Get(ctx context.Context) (string, error) {
+ src, err := hs.provider.Source(ctx)
+ if err != nil {
+ return "", err
+ }
+
+ hostname := ""
+ if src.Kind == source.HostnameKind {
+ hostname = src.Identifier
+ }
+
+ return hostname, nil
+}
+
+// GetSafe returns the hostname, or 'unknown host' if anything goes wrong.
+func (hs *service) GetSafe(ctx context.Context) string {
+ name, err := hs.Get(ctx)
+ if err != nil {
+ return "unknown host"
+ }
+ return name
+}
+
+// GetWithProvider returns the hostname for the Agent and the provider that was use to retrieve it.
+func (hs *service) GetWithProvider(ctx context.Context) (hostnameinterface.Data, error) {
+ name, err := hs.Get(ctx)
+ if err != nil {
+ return hostnameinterface.Data{}, err
+ }
+
+ return hostnameinterface.Data{
+ Hostname: name,
+ Provider: "",
+ }, nil
+}
+
+// NewHostnameService creates a new instance of the component hostname
+func NewHostnameService(provider source.Provider) hostnameinterface.Component {
+ return &service{
+ provider: provider,
+ }
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metadata/generated_status.go
index 0aaf599900..8b68b19b68 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metadata/generated_status.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metadata/generated_status.go
@@ -4,8 +4,6 @@ package metadata
import (
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/trace"
)
var (
@@ -17,11 +15,3 @@ const (
TracesStability = component.StabilityLevelBeta
MetricsStability = component.StabilityLevelBeta
)
-
-func Meter(settings component.TelemetrySettings) metric.Meter {
- return settings.MeterProvider.Meter("otelcol/datadog")
-}
-
-func Tracer(settings component.TelemetrySettings) trace.Tracer {
- return settings.TracerProvider.Tracer("otelcol/datadog")
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metadata/generated_telemetry.go
new file mode 100644
index 0000000000..6f07d78230
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metadata/generated_telemetry.go
@@ -0,0 +1,17 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func Meter(settings component.TelemetrySettings) metric.Meter {
+ return settings.MeterProvider.Meter("otelcol/datadog")
+}
+
+func Tracer(settings component.TelemetrySettings) trace.Tracer {
+ return settings.TracerProvider.Tracer("otelcol/datadog")
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metrics/consumer.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metrics/consumer.go
index 0f7ad4602b..ae00f72370 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metrics/consumer.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metrics/consumer.go
@@ -6,7 +6,6 @@ package metrics // import "github.com/open-telemetry/opentelemetry-collector-con
import (
"context"
- pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace"
"github.com/DataDog/datadog-api-client-go/v2/api/datadog"
"github.com/DataDog/datadog-api-client-go/v2/api/datadogV2"
"github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics"
@@ -19,14 +18,12 @@ import (
var _ metrics.Consumer = (*Consumer)(nil)
var _ metrics.HostConsumer = (*Consumer)(nil)
var _ metrics.TagsConsumer = (*Consumer)(nil)
-var _ metrics.APMStatsConsumer = (*Consumer)(nil)
// Consumer implements metrics.Consumer. It records consumed metrics, sketches and
// APM stats payloads. It provides them to the caller using the All method.
type Consumer struct {
ms []datadogV2.MetricSeries
sl sketches.SketchSeriesList
- as []*pb.ClientStatsPayload
seenHosts map[string]struct{}
seenTags map[string]struct{}
}
@@ -80,11 +77,11 @@ func (c *Consumer) runningMetrics(timestamp uint64, buildInfo component.BuildInf
}
// All gets all metrics (consumed metrics and running metrics).
-func (c *Consumer) All(timestamp uint64, buildInfo component.BuildInfo, tags []string, metadata metrics.Metadata) ([]datadogV2.MetricSeries, sketches.SketchSeriesList, []*pb.ClientStatsPayload) {
+func (c *Consumer) All(timestamp uint64, buildInfo component.BuildInfo, tags []string, metadata metrics.Metadata) ([]datadogV2.MetricSeries, sketches.SketchSeriesList) {
series := c.ms
series = append(series, c.runningMetrics(timestamp, buildInfo, metadata)...)
if len(tags) == 0 {
- return series, c.sl, c.as
+ return series, c.sl
}
for i := range series {
series[i].Tags = append(series[i].Tags, tags...)
@@ -92,15 +89,7 @@ func (c *Consumer) All(timestamp uint64, buildInfo component.BuildInfo, tags []s
for i := range c.sl {
c.sl[i].Tags = append(c.sl[i].Tags, tags...)
}
- for i := range c.as {
- c.as[i].Tags = append(c.as[i].Tags, tags...)
- }
- return series, c.sl, c.as
-}
-
-// ConsumeAPMStats implements metrics.APMStatsConsumer.
-func (c *Consumer) ConsumeAPMStats(s *pb.ClientStatsPayload) {
- c.as = append(c.as, s)
+ return series, c.sl
}
// ConsumeTimeSeries implements the metrics.Consumer interface.
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metrics/consumer_deprecated.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metrics/consumer_deprecated.go
index a22db4b067..c47c2acda3 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metrics/consumer_deprecated.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metrics/consumer_deprecated.go
@@ -6,7 +6,6 @@ package metrics // import "github.com/open-telemetry/opentelemetry-collector-con
import (
"context"
- pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace"
"github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics"
"github.com/DataDog/opentelemetry-mapping-go/pkg/quantile"
"go.opentelemetry.io/collector/component"
@@ -18,14 +17,12 @@ import (
var _ metrics.Consumer = (*ZorkianConsumer)(nil)
var _ metrics.HostConsumer = (*ZorkianConsumer)(nil)
var _ metrics.TagsConsumer = (*ZorkianConsumer)(nil)
-var _ metrics.APMStatsConsumer = (*ZorkianConsumer)(nil)
// ZorkianConsumer implements metrics.Consumer. It records consumed metrics, sketches and
// APM stats payloads. It provides them to the caller using the All method.
type ZorkianConsumer struct {
ms []zorkian.Metric
sl sketches.SketchSeriesList
- as []*pb.ClientStatsPayload
seenHosts map[string]struct{}
seenTags map[string]struct{}
}
@@ -72,11 +69,11 @@ func (c *ZorkianConsumer) runningMetrics(timestamp uint64, buildInfo component.B
}
// All gets all metrics (consumed metrics and running metrics).
-func (c *ZorkianConsumer) All(timestamp uint64, buildInfo component.BuildInfo, tags []string) ([]zorkian.Metric, sketches.SketchSeriesList, []*pb.ClientStatsPayload) {
+func (c *ZorkianConsumer) All(timestamp uint64, buildInfo component.BuildInfo, tags []string) ([]zorkian.Metric, sketches.SketchSeriesList) {
series := c.ms
series = append(series, c.runningMetrics(timestamp, buildInfo)...)
if len(tags) == 0 {
- return series, c.sl, c.as
+ return series, c.sl
}
for i := range series {
series[i].Tags = append(series[i].Tags, tags...)
@@ -84,15 +81,7 @@ func (c *ZorkianConsumer) All(timestamp uint64, buildInfo component.BuildInfo, t
for i := range c.sl {
c.sl[i].Tags = append(c.sl[i].Tags, tags...)
}
- for i := range c.as {
- c.as[i].Tags = append(c.as[i].Tags, tags...)
- }
- return series, c.sl, c.as
-}
-
-// ConsumeAPMStats implements metrics.APMStatsConsumer.
-func (c *ZorkianConsumer) ConsumeAPMStats(s *pb.ClientStatsPayload) {
- c.as = append(c.as, s)
+ return series, c.sl
}
// ConsumeTimeSeries implements the metrics.Consumer interface.
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/logs_exporter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/logs_exporter.go
index 37d0a52b16..d54d8f1a06 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/logs_exporter.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/logs_exporter.go
@@ -8,6 +8,9 @@ import (
"fmt"
"sync"
+ "github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline"
+ "github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl"
+ "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter"
"github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata"
"github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes"
"github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source"
@@ -23,9 +26,12 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/scrub"
)
-// otelSource specifies a source to be added to all logs sent from the Datadog exporter
-// The tag has key `otel_source` and the value specified on this constant.
-const otelSource = "datadog_exporter"
+const (
+ // logSourceName specifies the Datadog source tag value to be added to logs sent from the Datadog exporter.
+ logSourceName = "otlp_log_ingestion"
+ // otelSource specifies a source to be added to all logs sent from the Datadog exporter. The tag has key `otel_source` and the value specified on this constant.
+ otelSource = "datadog_exporter"
+)
type logsExporter struct {
params exporter.CreateSettings
@@ -114,3 +120,34 @@ func (exp *logsExporter) consumeLogs(ctx context.Context, ld plog.Logs) (err err
payloads := exp.translator.MapLogs(ctx, ld)
return exp.sender.SubmitLogs(exp.ctx, payloads)
}
+
+// newLogsAgentExporter creates new instances of the logs agent and the logs agent exporter
+func newLogsAgentExporter(
+ ctx context.Context,
+ params exporter.CreateSettings,
+ cfg *Config,
+ sourceProvider source.Provider,
+) (logsagentpipeline.LogsAgent, exporter.Logs, error) {
+ logComponent := newLogComponent(params.TelemetrySettings)
+ cfgComponent := newConfigComponent(params.TelemetrySettings, cfg)
+ logsAgentConfig := &logsagentexporter.Config{
+ OtelSource: otelSource,
+ LogSourceName: logSourceName,
+ }
+ hostnameComponent := logs.NewHostnameService(sourceProvider)
+ logsAgent := logsagentpipelineimpl.NewLogsAgent(logsagentpipelineimpl.Dependencies{
+ Log: logComponent,
+ Config: cfgComponent,
+ Hostname: hostnameComponent,
+ })
+ err := logsAgent.Start(ctx)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to create logs agent: %w", err)
+ }
+ pipelineChan := logsAgent.GetPipelineProvider().NextPipelineChan()
+ logsAgentExporter, err := logsagentexporter.NewFactory(pipelineChan).CreateLogsExporter(ctx, params, logsAgentConfig)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to create logs agent exporter: %w", err)
+ }
+ return logsAgent, logsAgentExporter, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/metadata.yaml
index eb1992ca14..912ec16b09 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/metadata.yaml
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/metadata.yaml
@@ -8,7 +8,7 @@ status:
beta: [traces, metrics]
distributions: [contrib]
codeowners:
- active: [mx-psi, dineshg13, liustanley, songy23, mackjmr]
+ active: [mx-psi, dineshg13, liustanley, songy23, mackjmr, ankitpatel96]
emeritus: [gbbr]
tests:
@@ -21,4 +21,7 @@ tests:
enabled: false
retry_on_failure:
enabled: false
- expect_consumer_error: true
\ No newline at end of file
+ expect_consumer_error: true
+ goleak:
+ setup: "setupTestMain(m)"
+ skip: true
\ No newline at end of file
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/metrics_exporter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/metrics_exporter.go
index 94709256d1..27dc783e38 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/metrics_exporter.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/metrics_exporter.go
@@ -12,7 +12,6 @@ import (
"sync"
"time"
- pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace"
"github.com/DataDog/datadog-agent/pkg/trace/config"
"github.com/DataDog/datadog-api-client-go/v2/api/datadogV2"
"github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata"
@@ -48,8 +47,7 @@ type metricsExporter struct {
metadataReporter *inframetadata.Reporter
// getPushTime returns a Unix time in nanoseconds, representing the time pushing metrics.
// It will be overwritten in tests.
- getPushTime func() uint64
- statsToAgent chan<- *pb.StatsPayload
+ getPushTime func() uint64
}
// translatorFromConfig creates a new metrics translator from the exporter
@@ -97,7 +95,6 @@ func newMetricsExporter(
onceMetadata *sync.Once,
attrsTranslator *attributes.Translator,
sourceProvider source.Provider,
- statsToAgent chan<- *pb.StatsPayload,
metadataReporter *inframetadata.Reporter,
statsOut chan []byte,
) (*metricsExporter, error) {
@@ -118,7 +115,6 @@ func newMetricsExporter(
onceMetadata: onceMetadata,
sourceProvider: sourceProvider,
getPushTime: func() uint64 { return uint64(time.Now().UTC().UnixNano()) },
- statsToAgent: statsToAgent,
metadataReporter: metadataReporter,
}
errchan := make(chan error)
@@ -222,11 +218,10 @@ func (exp *metricsExporter) PushMetricsData(ctx context.Context, md pmetric.Metr
}
var sl sketches.SketchSeriesList
- var sp []*pb.ClientStatsPayload
var errs []error
if isMetricExportV2Enabled() {
var ms []datadogV2.MetricSeries
- ms, sl, sp = consumer.(*metrics.Consumer).All(exp.getPushTime(), exp.params.BuildInfo, tags, metadata)
+ ms, sl = consumer.(*metrics.Consumer).All(exp.getPushTime(), exp.params.BuildInfo, tags, metadata)
if len(ms) > 0 {
exp.params.Logger.Debug("exporting native Datadog payload", zap.Any("metric", ms))
_, experr := exp.retrier.DoWithRetries(ctx, func(context.Context) error {
@@ -238,7 +233,7 @@ func (exp *metricsExporter) PushMetricsData(ctx context.Context, md pmetric.Metr
}
} else {
var ms []zorkian.Metric
- ms, sl, sp = consumer.(*metrics.ZorkianConsumer).All(exp.getPushTime(), exp.params.BuildInfo, tags)
+ ms, sl = consumer.(*metrics.ZorkianConsumer).All(exp.getPushTime(), exp.params.BuildInfo, tags)
if len(ms) > 0 {
exp.params.Logger.Debug("exporting Zorkian Datadog payload", zap.Any("metric", ms))
_, experr := exp.retrier.DoWithRetries(ctx, func(context.Context) error {
@@ -256,23 +251,5 @@ func (exp *metricsExporter) PushMetricsData(ctx context.Context, md pmetric.Metr
errs = append(errs, experr)
}
- if len(sp) > 0 {
- exp.params.Logger.Debug("exporting APM stats payloads", zap.Any("stats_payloads", sp))
- statsv := exp.params.BuildInfo.Command + exp.params.BuildInfo.Version
- for _, csp := range sp {
- if csp.TracerVersion == "" {
- csp.TracerVersion = statsv
- }
- }
- exp.statsToAgent <- &pb.StatsPayload{
- AgentHostname: exp.agntConfig.Hostname, // This is "dead-code". We will be removing this code path entirely
- AgentEnv: exp.agntConfig.DefaultEnv,
- Stats: sp,
- AgentVersion: exp.agntConfig.AgentVersion,
- ClientComputed: false,
- SplitPayload: false,
- }
- }
-
return errors.Join(errs...)
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/traces_exporter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/traces_exporter.go
index 05088b2f3d..640e22b0b1 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/traces_exporter.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/traces_exporter.go
@@ -208,7 +208,6 @@ func newTraceAgentConfig(ctx context.Context, params exporter.CreateSettings, cf
acfg.AgentVersion = fmt.Sprintf("datadogexporter-%s-%s", params.BuildInfo.Command, params.BuildInfo.Version)
acfg.SkipSSLValidation = cfg.ClientConfig.TLSSetting.InsecureSkipVerify
acfg.ComputeStatsBySpanKind = cfg.Traces.ComputeStatsBySpanKind
- acfg.PeerServiceAggregation = cfg.Traces.PeerServiceAggregation
acfg.PeerTagsAggregation = cfg.Traces.PeerTagsAggregation
acfg.PeerTags = cfg.Traces.PeerTags
if v := cfg.Traces.flushInterval; v > 0 {
@@ -220,6 +219,9 @@ func newTraceAgentConfig(ctx context.Context, params exporter.CreateSettings, cf
if addr := cfg.Traces.Endpoint; addr != "" {
acfg.Endpoints[0].Host = addr
}
+ if cfg.Traces.ComputeTopLevelBySpanKind {
+ acfg.Features["enable_otlp_compute_top_level_by_span_kind"] = struct{}{}
+ }
tracelog.SetLogger(&zaplogger{params.Logger}) //TODO: This shouldn't be a singleton
return acfg, nil
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter/README.md
index fb4a31590e..ec550717b6 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter/README.md
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter/README.md
@@ -26,7 +26,19 @@ Exporter supports the following features:
Please note that there is no guarantee that exact field names will remain stable.
The official [opentelemetry-collector-contrib container](https://hub.docker.com/r/otel/opentelemetry-collector-contrib/tags#!) does not have a writable filesystem by default since it's built on the `scratch` layer.
-As such, you will need to create a writable directory for the path, potentially by mounting writable volumes or creating a custom image.
+As such, you will need to create a writable directory for the path. You could do this by [mounting a volume](https://docs.docker.com/storage/volumes/#choose-the--v-or---mount-flag) with flags such as `rw` or `rwZ`.
+
+On Linux, and given a `otel-collector-config.yaml` with a `file` exporter whose path is prefixed with `/file-exporter`,
+```bash
+# linux needs +x to list a directory. You can use a+ instead of o+ for the mode if you want to ensure your user and group has access.
+mkdir --mode o+rwx file-exporter
+# z is an SELinux construct that is ignored on other systems
+docker run -v "./file-exporter:/file-exporter:rwz" -v "otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml" otel/opentelemetry-collector-contrib:latest
+```
+Note this same syntax for volumes will work with docker-compose.
+
+You could also modify the base image and manually build your own container to have a writeable directory or change the runas uid if needed, but this is more involved.
+
## Configuration options:
The following settings are required:
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter/internal/metadata/generated_status.go
index aa9a33eaeb..4a61e902bd 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter/internal/metadata/generated_status.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter/internal/metadata/generated_status.go
@@ -4,8 +4,6 @@ package metadata
import (
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/trace"
)
var (
@@ -17,11 +15,3 @@ const (
MetricsStability = component.StabilityLevelAlpha
LogsStability = component.StabilityLevelAlpha
)
-
-func Meter(settings component.TelemetrySettings) metric.Meter {
- return settings.MeterProvider.Meter("otelcol/file")
-}
-
-func Tracer(settings component.TelemetrySettings) trace.Tracer {
- return settings.TracerProvider.Tracer("otelcol/file")
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter/internal/metadata/generated_telemetry.go
new file mode 100644
index 0000000000..fee9287ffc
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter/internal/metadata/generated_telemetry.go
@@ -0,0 +1,17 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func Meter(settings component.TelemetrySettings) metric.Meter {
+ return settings.MeterProvider.Meter("otelcol/file")
+}
+
+func Tracer(settings component.TelemetrySettings) trace.Tracer {
+ return settings.TracerProvider.Tracer("otelcol/file")
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter/metadata.yaml
index fd61213f8b..f3a1ff0314 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter/metadata.yaml
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter/metadata.yaml
@@ -12,4 +12,9 @@ status:
tests:
config:
path: testdata/log.json
- expect_consumer_error: true
\ No newline at end of file
+ expect_consumer_error: true
+ goleak:
+ ignore:
+ top:
+ # Existing issue for leak: https://github.com/natefinch/lumberjack/issues/56
+ - "gopkg.in/natefinch/lumberjack%2ev2.(*Logger).millRun"
\ No newline at end of file
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/README.md
index 4788c7831b..d5ac964419 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/README.md
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/README.md
@@ -25,6 +25,7 @@ The following settings can be optionally configured:
- `resolve_canonical_bootstrap_servers_only` (default = false): Whether to resolve then reverse-lookup broker IPs during startup.
- `client_id` (default = "sarama"): The client ID to configure the Sarama Kafka client with. The client ID will be used for all produce requests.
- `topic` (default = otlp_spans for traces, otlp_metrics for metrics, otlp_logs for logs): The name of the kafka topic to export to.
+- `topic_from_attribute` (default = ""): Specify the resource attribute whose value should be used as the message's topic. This option, when set, will take precedence over the default topic. If `topic_from_attribute` is not set, the message's topic will be set to the value of the configuration option `topic` instead.
- `encoding` (default = otlp_proto): The encoding of the traces sent to kafka. All available encodings:
- `otlp_proto`: payload is Protobuf serialized from `ExportTraceServiceRequest` if set as a traces exporter or `ExportMetricsServiceRequest` for metrics or `ExportLogsServiceRequest` for logs.
- `otlp_json`: payload is JSON serialized from `ExportTraceServiceRequest` if set as a traces exporter or `ExportMetricsServiceRequest` for metrics or `ExportLogsServiceRequest` for logs.
@@ -36,6 +37,7 @@ The following settings can be optionally configured:
- The following encodings are valid *only* for **logs**.
- `raw`: if the log record body is a byte array, it is sent as is. Otherwise, it is serialized to JSON. Resource and record attributes are discarded.
- `partition_traces_by_id` (default = false): configures the exporter to include the trace ID as the message key in trace messages sent to kafka. *Please note:* this setting does not have any effect on Jaeger encoding exporters since Jaeger exporters include trace ID as the message key by default.
+- `partition_metrics_by_resource_attributes` (default = false) configures the exporter to include the hash of sorted resource attributes as the message partitioning key in metric messages sent to kafka.
- `auth`
- `plain_text`
- `username`: The username to use.
@@ -47,7 +49,7 @@ The following settings can be optionally configured:
- `version` (default = 0): The SASL protocol version to use (0 or 1)
- `aws_msk.region`: AWS Region in case of AWS_MSK_IAM mechanism
- `aws_msk.broker_addr`: MSK Broker address in case of AWS_MSK_IAM mechanism
- - `tls`
+ - `tls`: see [TLS Configuration Settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md) for the full set of available options.
- `ca_file`: path to the CA cert. For a client this verifies the server certificate. Should
only be used if `insecure` is set to false.
- `cert_file`: path to the TLS cert to use for TLS required connections. Should
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/config.go
index daa387bfb9..d048b27a88 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/config.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/config.go
@@ -40,6 +40,9 @@ type Config struct {
// The name of the kafka topic to export to (default otlp_spans for traces, otlp_metrics for metrics)
Topic string `mapstructure:"topic"`
+ // TopicFromAttribute is the name of the attribute to use as the topic name.
+ TopicFromAttribute string `mapstructure:"topic_from_attribute"`
+
// Encoding of messages (default "otlp_proto")
Encoding string `mapstructure:"encoding"`
@@ -48,6 +51,8 @@ type Config struct {
// trace ID as the message key by default.
PartitionTracesByID bool `mapstructure:"partition_traces_by_id"`
+ PartitionMetricsByResourceAttributes bool `mapstructure:"partition_metrics_by_resource_attributes"`
+
// Metadata is the namespace for metadata management properties used by the
// Client, and shared by the Producer/Consumer.
Metadata Metadata `mapstructure:"metadata"`
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/factory.go
index e822b0a005..d990a17dab 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/factory.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/factory.go
@@ -38,6 +38,8 @@ const (
defaultCompression = "none"
// default from sarama.NewConfig()
defaultFluxMaxMessages = 0
+ // partitioning metrics by resource attributes is disabled by default
+ defaultPartitionMetricsByResourceAttributesEnabled = false
)
// FactoryOption applies changes to kafkaExporterFactory.
@@ -97,8 +99,9 @@ func createDefaultConfig() component.Config {
Brokers: []string{defaultBroker},
ClientID: defaultClientID,
// using an empty topic to track when it has not been set by user, default is based on traces or metrics.
- Topic: "",
- Encoding: defaultEncoding,
+ Topic: "",
+ Encoding: defaultEncoding,
+ PartitionMetricsByResourceAttributes: defaultPartitionMetricsByResourceAttributesEnabled,
Metadata: Metadata{
Full: defaultMetadataFull,
Retry: MetadataRetry{
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_status.go
index b8d45d135f..63e9bce921 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_status.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_status.go
@@ -4,8 +4,6 @@ package metadata
import (
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/trace"
)
var (
@@ -17,11 +15,3 @@ const (
MetricsStability = component.StabilityLevelBeta
LogsStability = component.StabilityLevelBeta
)
-
-func Meter(settings component.TelemetrySettings) metric.Meter {
- return settings.MeterProvider.Meter("otelcol/kafka")
-}
-
-func Tracer(settings component.TelemetrySettings) trace.Tracer {
- return settings.TracerProvider.Tracer("otelcol/kafka")
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_telemetry.go
new file mode 100644
index 0000000000..5f4488d926
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_telemetry.go
@@ -0,0 +1,17 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func Meter(settings component.TelemetrySettings) metric.Meter {
+ return settings.MeterProvider.Meter("otelcol/kafka")
+}
+
+func Tracer(settings component.TelemetrySettings) trace.Tracer {
+ return settings.TracerProvider.Tracer("otelcol/kafka")
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/kafka_exporter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/kafka_exporter.go
index c98abdc674..59fc26e647 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/kafka_exporter.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/kafka_exporter.go
@@ -12,6 +12,7 @@ import (
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer/consumererror"
"go.opentelemetry.io/collector/exporter"
+ "go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
@@ -26,7 +27,6 @@ var errUnrecognizedEncoding = fmt.Errorf("unrecognized encoding")
type kafkaTracesProducer struct {
cfg Config
producer sarama.SyncProducer
- topic string
marshaler TracesMarshaler
logger *zap.Logger
}
@@ -41,7 +41,7 @@ func (ke kafkaErrors) Error() string {
}
func (e *kafkaTracesProducer) tracesPusher(_ context.Context, td ptrace.Traces) error {
- messages, err := e.marshaler.Marshal(td, e.topic)
+ messages, err := e.marshaler.Marshal(td, getTopic(&e.cfg, td.ResourceSpans()))
if err != nil {
return consumererror.NewPermanent(err)
}
@@ -78,13 +78,12 @@ func (e *kafkaTracesProducer) start(_ context.Context, _ component.Host) error {
type kafkaMetricsProducer struct {
cfg Config
producer sarama.SyncProducer
- topic string
marshaler MetricsMarshaler
logger *zap.Logger
}
func (e *kafkaMetricsProducer) metricsDataPusher(_ context.Context, md pmetric.Metrics) error {
- messages, err := e.marshaler.Marshal(md, e.topic)
+ messages, err := e.marshaler.Marshal(md, getTopic(&e.cfg, md.ResourceMetrics()))
if err != nil {
return consumererror.NewPermanent(err)
}
@@ -121,13 +120,12 @@ func (e *kafkaMetricsProducer) start(_ context.Context, _ component.Host) error
type kafkaLogsProducer struct {
cfg Config
producer sarama.SyncProducer
- topic string
marshaler LogsMarshaler
logger *zap.Logger
}
func (e *kafkaLogsProducer) logsDataPusher(_ context.Context, ld plog.Logs) error {
- messages, err := e.marshaler.Marshal(ld, e.topic)
+ messages, err := e.marshaler.Marshal(ld, getTopic(&e.cfg, ld.ResourceLogs()))
if err != nil {
return consumererror.NewPermanent(err)
}
@@ -211,9 +209,14 @@ func newMetricsExporter(config Config, set exporter.CreateSettings, marshalers m
if marshaler == nil {
return nil, errUnrecognizedEncoding
}
+ if config.PartitionMetricsByResourceAttributes {
+ if keyableMarshaler, ok := marshaler.(KeyableMetricsMarshaler); ok {
+ keyableMarshaler.Key()
+ }
+ }
+
return &kafkaMetricsProducer{
cfg: config,
- topic: config.Topic,
marshaler: marshaler,
logger: set.Logger,
}, nil
@@ -234,7 +237,6 @@ func newTracesExporter(config Config, set exporter.CreateSettings, marshalers ma
return &kafkaTracesProducer{
cfg: config,
- topic: config.Topic,
marshaler: marshaler,
logger: set.Logger,
}, nil
@@ -248,9 +250,30 @@ func newLogsExporter(config Config, set exporter.CreateSettings, marshalers map[
return &kafkaLogsProducer{
cfg: config,
- topic: config.Topic,
marshaler: marshaler,
logger: set.Logger,
}, nil
}
+
+type resourceSlice[T any] interface {
+ Len() int
+ At(int) T
+}
+
+type resource interface {
+ Resource() pcommon.Resource
+}
+
+func getTopic[T resource](cfg *Config, resources resourceSlice[T]) string {
+ if cfg.TopicFromAttribute == "" {
+ return cfg.Topic
+ }
+ for i := 0; i < resources.Len(); i++ {
+ rv, ok := resources.At(i).Resource().Attributes().Get(cfg.TopicFromAttribute)
+ if ok && rv.Str() != "" {
+ return rv.Str()
+ }
+ }
+ return cfg.Topic
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/pdata_marshaler.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/pdata_marshaler.go
index d9e38dd52c..3429cdd831 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/pdata_marshaler.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/pdata_marshaler.go
@@ -11,6 +11,7 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/traceutil"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil"
)
type pdataLogsMarshaler struct {
@@ -42,22 +43,58 @@ func newPdataLogsMarshaler(marshaler plog.Marshaler, encoding string) LogsMarsha
}
}
+// KeyableMetricsMarshaler is an extension of the MetricsMarshaler interface intended to provide partition key capabilities
+// for metrics messages
+type KeyableMetricsMarshaler interface {
+ MetricsMarshaler
+ Key()
+}
+
type pdataMetricsMarshaler struct {
marshaler pmetric.Marshaler
encoding string
+ keyed bool
+}
+
+// Key configures the pdataMetricsMarshaler to set the message key on the kafka messages
+func (p *pdataMetricsMarshaler) Key() {
+ p.keyed = true
}
func (p pdataMetricsMarshaler) Marshal(ld pmetric.Metrics, topic string) ([]*sarama.ProducerMessage, error) {
- bts, err := p.marshaler.MarshalMetrics(ld)
- if err != nil {
- return nil, err
- }
- return []*sarama.ProducerMessage{
- {
+ var msgs []*sarama.ProducerMessage
+ if p.keyed {
+ metrics := ld.ResourceMetrics()
+
+ for i := 0; i < metrics.Len(); i++ {
+ resourceMetrics := metrics.At(i)
+ var hash = pdatautil.MapHash(resourceMetrics.Resource().Attributes())
+
+ newMetrics := pmetric.NewMetrics()
+ resourceMetrics.CopyTo(newMetrics.ResourceMetrics().AppendEmpty())
+
+ bts, err := p.marshaler.MarshalMetrics(newMetrics)
+ if err != nil {
+ return nil, err
+ }
+ msgs = append(msgs, &sarama.ProducerMessage{
+ Topic: topic,
+ Value: sarama.ByteEncoder(bts),
+ Key: sarama.ByteEncoder(hash[:]),
+ })
+ }
+ } else {
+ bts, err := p.marshaler.MarshalMetrics(ld)
+ if err != nil {
+ return nil, err
+ }
+ msgs = append(msgs, &sarama.ProducerMessage{
Topic: topic,
Value: sarama.ByteEncoder(bts),
- },
- }, nil
+ })
+ }
+
+ return msgs, nil
}
func (p pdataMetricsMarshaler) Encoding() string {
@@ -65,13 +102,13 @@ func (p pdataMetricsMarshaler) Encoding() string {
}
func newPdataMetricsMarshaler(marshaler pmetric.Marshaler, encoding string) MetricsMarshaler {
- return pdataMetricsMarshaler{
+ return &pdataMetricsMarshaler{
marshaler: marshaler,
encoding: encoding,
}
}
-// KeyableTracesMarshaler is an extension of the TracesMarshaler interface inteded to provide partition key capabilities
+// KeyableTracesMarshaler is an extension of the TracesMarshaler interface intended to provide partition key capabilities
// for trace messages
type KeyableTracesMarshaler interface {
TracesMarshaler
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/README.md
index 3f17f084e1..d6bb3bd8b5 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/README.md
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/README.md
@@ -55,7 +55,7 @@ The `loadbalancingexporter` will, irrespective of the chosen resolver (`static`,
Refer to [config.yaml](./testdata/config.yaml) for detailed examples on using the processor.
* The `otlp` property configures the template used for building the OTLP exporter. Refer to the OTLP Exporter documentation for information on which options are available. Note that the `endpoint` property should not be set and will be overridden by this exporter with the backend endpoint.
-* The `resolver` accepts a `static` node, a `dns`, a `k8s` service or `awsCloudMap`. If all four are specified, an `errMultipleResolversProvided` error will be thrown.
+* The `resolver` accepts a `static` node, a `dns`, a `k8s` service or `aws_cloud_map`. If all four are specified, an `errMultipleResolversProvided` error will be thrown.
* The `hostname` property inside a `dns` node specifies the hostname to query in order to obtain the list of IP addresses.
* The `dns` node also accepts the following optional properties:
* `hostname` DNS hostname to resolve.
@@ -66,13 +66,13 @@ Refer to [config.yaml](./testdata/config.yaml) for detailed examples on using th
* `service` Kubernetes service to resolve, e.g. `lb-svc.lb-ns`. If no namespace is specified, an attempt will be made to infer the namespace for this collector, and if this fails it will fall back to the `default` namespace.
* `ports` port to be used for exporting the traces to the addresses resolved from `service`. If `ports` is not specified, the default port 4317 is used. When multiple ports are specified, two backends are added to the load balancer as if they were at different pods.
* `timeout` resolver timeout in go-Duration format, e.g. `5s`, `1d`, `30m`. If not specified, `1s` will be used.
-* The `awsCloudMap` node accepts the following properties:
+* The `aws_cloud_map` node accepts the following properties:
* `namespace` The CloudMap namespace where the service is register, e.g. `cloudmap`. If no `namespace` is specified, this will fail to start the Load Balancer exporter.
- * `serviceName` The name of the service that you specified when you registered the instance, e.g. `otelcollectors`. If no `serviceName` is specified, this will fail to start the Load Balancer exporter.
+ * `service_name` The name of the service that you specified when you registered the instance, e.g. `otelcollectors`. If no `service_name` is specified, this will fail to start the Load Balancer exporter.
* `interval` resolver interval in go-Duration format, e.g. `5s`, `1d`, `30m`. If not specified, `30s` will be used.
* `timeout` resolver timeout in go-Duration format, e.g. `5s`, `1d`, `30m`. If not specified, `5s` will be used.
* `port` port to be used for exporting the traces to the addresses resolved from `service`. By default, the port is set in Cloud Map, but can be be overridden with a static value in this config
- * `healthStatus` filter in AWS Cloud Map, you can specify the health status of the instances that you want to discover. The healthStatus filter is optional and allows you to query based on the health status of the instances.
+ * `health_status` filter in AWS Cloud Map, you can specify the health status of the instances that you want to discover. The health_status filter is optional and allows you to query based on the health status of the instances.
* Available values are
* `HEALTHY`: Only return instances that are healthy.
* `UNHEALTHY`: Only return instances that are unhealthy.
@@ -192,9 +192,9 @@ exporters:
# except the endpoint
timeout: 3s
resolver:
- awsCloudMap:
+ aws_cloud_map:
namespace: aws-namespace
- serviceName: aws-otel-col-service-name
+ service_name: aws-otel-col-service-name
interval: 30s
service:
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/config.go
index 6acee4a8eb..d3109bc905 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/config.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/config.go
@@ -36,7 +36,7 @@ type ResolverSettings struct {
Static *StaticResolver `mapstructure:"static"`
DNS *DNSResolver `mapstructure:"dns"`
K8sSvc *K8sSvcResolver `mapstructure:"k8s"`
- AWSCloudMap *AWSCloudMapResolver `mapstructure:"awsCloudMap"`
+ AWSCloudMap *AWSCloudMapResolver `mapstructure:"aws_cloud_map"`
}
// StaticResolver defines the configuration for the resolver providing a fixed list of backends
@@ -61,8 +61,8 @@ type K8sSvcResolver struct {
type AWSCloudMapResolver struct {
NamespaceName string `mapstructure:"namespace"`
- ServiceName string `mapstructure:"serviceName"`
- HealthStatus types.HealthStatusFilter `mapstructure:"healthStatus"`
+ ServiceName string `mapstructure:"service_name"`
+ HealthStatus types.HealthStatusFilter `mapstructure:"health_status"`
Interval time.Duration `mapstructure:"interval"`
Timeout time.Duration `mapstructure:"timeout"`
Port *uint16 `mapstructure:"port"`
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/helpers.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/helpers.go
index b275ebd52f..13322efb98 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/helpers.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/helpers.go
@@ -10,114 +10,12 @@ import (
// mergeTraces concatenates two ptrace.Traces into a single ptrace.Traces.
func mergeTraces(t1 ptrace.Traces, t2 ptrace.Traces) ptrace.Traces {
- mergedTraces := ptrace.NewTraces()
-
- if t1.SpanCount() == 0 && t2.SpanCount() == 0 {
- return mergedTraces
- }
-
- // Iterate over the first trace and append spans to the merged traces
- for i := 0; i < t1.ResourceSpans().Len(); i++ {
- rs := t1.ResourceSpans().At(i)
- newRS := mergedTraces.ResourceSpans().AppendEmpty()
-
- rs.Resource().MoveTo(newRS.Resource())
- newRS.SetSchemaUrl(rs.SchemaUrl())
-
- for j := 0; j < rs.ScopeSpans().Len(); j++ {
- ils := rs.ScopeSpans().At(j)
-
- newILS := newRS.ScopeSpans().AppendEmpty()
- ils.Scope().MoveTo(newILS.Scope())
- newILS.SetSchemaUrl(ils.SchemaUrl())
-
- for k := 0; k < ils.Spans().Len(); k++ {
- span := ils.Spans().At(k)
- newSpan := newILS.Spans().AppendEmpty()
- span.MoveTo(newSpan)
- }
- }
- }
-
- // Iterate over the second trace and append spans to the merged traces
- for i := 0; i < t2.ResourceSpans().Len(); i++ {
- rs := t2.ResourceSpans().At(i)
- newRS := mergedTraces.ResourceSpans().AppendEmpty()
-
- rs.Resource().MoveTo(newRS.Resource())
- newRS.SetSchemaUrl(rs.SchemaUrl())
-
- for j := 0; j < rs.ScopeSpans().Len(); j++ {
- ils := rs.ScopeSpans().At(j)
-
- newILS := newRS.ScopeSpans().AppendEmpty()
- ils.Scope().MoveTo(newILS.Scope())
- newILS.SetSchemaUrl(ils.SchemaUrl())
-
- for k := 0; k < ils.Spans().Len(); k++ {
- span := ils.Spans().At(k)
- newSpan := newILS.Spans().AppendEmpty()
- span.MoveTo(newSpan)
- }
- }
- }
-
- return mergedTraces
+ t2.ResourceSpans().MoveAndAppendTo(t1.ResourceSpans())
+ return t1
}
// mergeMetrics concatenates two pmetric.Metrics into a single pmetric.Metrics.
func mergeMetrics(m1 pmetric.Metrics, m2 pmetric.Metrics) pmetric.Metrics {
- mergedMetrics := pmetric.NewMetrics()
-
- if m1.MetricCount() == 0 && m2.MetricCount() == 0 {
- return mergedMetrics
- }
-
- // Iterate over the first metric and append metrics to the merged metrics
- for i := 0; i < m1.ResourceMetrics().Len(); i++ {
- rs := m1.ResourceMetrics().At(i)
- newRS := mergedMetrics.ResourceMetrics().AppendEmpty()
-
- rs.Resource().MoveTo(newRS.Resource())
- newRS.SetSchemaUrl(rs.SchemaUrl())
-
- for j := 0; j < rs.ScopeMetrics().Len(); j++ {
- ils := rs.ScopeMetrics().At(j)
-
- newILS := newRS.ScopeMetrics().AppendEmpty()
- ils.Scope().MoveTo(newILS.Scope())
- newILS.SetSchemaUrl(ils.SchemaUrl())
-
- for k := 0; k < ils.Metrics().Len(); k++ {
- metric := ils.Metrics().At(k)
- newMetric := newILS.Metrics().AppendEmpty()
- metric.MoveTo(newMetric)
- }
- }
- }
-
- // Iterate over the second metric and append metrics to the merged metrics
- for i := 0; i < m2.ResourceMetrics().Len(); i++ {
- rs := m2.ResourceMetrics().At(i)
- newRS := mergedMetrics.ResourceMetrics().AppendEmpty()
-
- rs.Resource().MoveTo(newRS.Resource())
- newRS.SetSchemaUrl(rs.SchemaUrl())
-
- for j := 0; j < rs.ScopeMetrics().Len(); j++ {
- ils := rs.ScopeMetrics().At(j)
-
- newILS := newRS.ScopeMetrics().AppendEmpty()
- ils.Scope().MoveTo(newILS.Scope())
- newILS.SetSchemaUrl(ils.SchemaUrl())
-
- for k := 0; k < ils.Metrics().Len(); k++ {
- metric := ils.Metrics().At(k)
- newMetric := newILS.Metrics().AppendEmpty()
- metric.MoveTo(newMetric)
- }
- }
- }
-
- return mergedMetrics
+ m2.ResourceMetrics().MoveAndAppendTo(m1.ResourceMetrics())
+ return m1
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/internal/metadata/generated_status.go
index 111de6b0d3..98cd81971c 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/internal/metadata/generated_status.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/internal/metadata/generated_status.go
@@ -4,8 +4,6 @@ package metadata
import (
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/trace"
)
var (
@@ -17,11 +15,3 @@ const (
TracesStability = component.StabilityLevelBeta
LogsStability = component.StabilityLevelBeta
)
-
-func Meter(settings component.TelemetrySettings) metric.Meter {
- return settings.MeterProvider.Meter("otelcol/loadbalancing")
-}
-
-func Tracer(settings component.TelemetrySettings) trace.Tracer {
- return settings.TracerProvider.Tracer("otelcol/loadbalancing")
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/internal/metadata/generated_telemetry.go
new file mode 100644
index 0000000000..e22a8fdda8
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/internal/metadata/generated_telemetry.go
@@ -0,0 +1,17 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func Meter(settings component.TelemetrySettings) metric.Meter {
+ return settings.MeterProvider.Meter("otelcol/loadbalancing")
+}
+
+func Tracer(settings component.TelemetrySettings) trace.Tracer {
+ return settings.TracerProvider.Tracer("otelcol/loadbalancing")
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/loadbalancer.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/loadbalancer.go
index 2f826ad9bc..ea087762ba 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/loadbalancer.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/loadbalancer.go
@@ -92,7 +92,7 @@ func newLoadBalancer(params exporter.CreateSettings, cfg component.Config, facto
}
if oCfg.Resolver.AWSCloudMap != nil {
- awsCloudMapLogger := params.Logger.With(zap.String("resolver", "awsCloudMap"))
+ awsCloudMapLogger := params.Logger.With(zap.String("resolver", "aws_cloud_map"))
var err error
res, err = newCloudMapResolver(awsCloudMapLogger, &oCfg.Resolver.AWSCloudMap.NamespaceName, &oCfg.Resolver.AWSCloudMap.ServiceName, oCfg.Resolver.AWSCloudMap.Port, &oCfg.Resolver.AWSCloudMap.HealthStatus, oCfg.Resolver.AWSCloudMap.Interval, oCfg.Resolver.AWSCloudMap.Timeout)
if err != nil {
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/metadata.yaml
index 9918ca0e9b..d0077af9ca 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/metadata.yaml
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/metadata.yaml
@@ -25,3 +25,8 @@ tests:
- backend-3:4317
- backend-4:4317
expect_consumer_error: true
+ goleak:
+ ignore:
+ top:
+ # See https://github.com/census-instrumentation/opencensus-go/issues/1191 for more information.
+ - "go.opencensus.io/stats/view.(*worker).start"
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/resolver_aws_cloudmap.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/resolver_aws_cloudmap.go
index 21d1410701..ea316df105 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/resolver_aws_cloudmap.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter/resolver_aws_cloudmap.go
@@ -27,7 +27,7 @@ const (
var (
errNoNamespace = errors.New("no Cloud Map namespace specified to resolve the backends")
- errNoServiceName = errors.New("no Cloud Map serviceName specified to resolve the backends")
+ errNoServiceName = errors.New("no Cloud Map service_name specified to resolve the backends")
awsResolverMutator = tag.Upsert(tag.MustNewKey("resolver"), "aws")
@@ -115,10 +115,10 @@ func (r *cloudMapResolver) start(ctx context.Context) error {
go r.periodicallyResolve()
r.logger.Info("AWS CloudMap resolver started",
- zap.Stringp("serviceName", r.serviceName),
+ zap.Stringp("service_name", r.serviceName),
zap.Stringp("namespaceName", r.namespaceName),
zap.Uint16p("port", r.port),
- zap.String("healthStatus", string(*r.healthStatus)),
+ zap.String("health_status", string(*r.healthStatus)),
zap.Duration("interval", r.resInterval), zap.Duration("timeout", r.resTimeout))
return nil
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter/exporter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter/exporter.go
index 2b00d86e57..9ce04c26ef 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter/exporter.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter/exporter.go
@@ -113,7 +113,7 @@ func newLogzioLogsExporter(config *Config, set exporter.CreateSettings) (exporte
}
func (exporter *logzioExporter) start(ctx context.Context, host component.Host) error {
- client, err := exporter.config.ClientConfig.ToClientContext(ctx, host, exporter.settings)
+ client, err := exporter.config.ClientConfig.ToClient(ctx, host, exporter.settings)
if err != nil {
return err
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter/internal/metadata/generated_status.go
index 0a0ec9b101..d808141b06 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter/internal/metadata/generated_status.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter/internal/metadata/generated_status.go
@@ -4,8 +4,6 @@ package metadata
import (
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/trace"
)
var (
@@ -16,11 +14,3 @@ const (
TracesStability = component.StabilityLevelBeta
LogsStability = component.StabilityLevelBeta
)
-
-func Meter(settings component.TelemetrySettings) metric.Meter {
- return settings.MeterProvider.Meter("otelcol/logzio")
-}
-
-func Tracer(settings component.TelemetrySettings) trace.Tracer {
- return settings.TracerProvider.Tracer("otelcol/logzio")
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter/internal/metadata/generated_telemetry.go
new file mode 100644
index 0000000000..a7af4b2d8b
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter/internal/metadata/generated_telemetry.go
@@ -0,0 +1,17 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func Meter(settings component.TelemetrySettings) metric.Meter {
+ return settings.MeterProvider.Meter("otelcol/logzio")
+}
+
+func Tracer(settings component.TelemetrySettings) trace.Tracer {
+ return settings.TracerProvider.Tracer("otelcol/logzio")
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/collector.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/collector.go
index eeaced19fc..8a243c01a9 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/collector.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/collector.go
@@ -18,10 +18,6 @@ import (
prometheustranslator "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus"
)
-const (
- targetMetricName = "target_info"
-)
-
var (
separatorString = string([]byte{model.SeparatorByte})
)
@@ -56,11 +52,11 @@ func convertExemplars(exemplars pmetric.ExemplarSlice) []prometheus.Exemplar {
exemplarLabels := make(prometheus.Labels, 0)
if traceID := e.TraceID(); !traceID.IsEmpty() {
- exemplarLabels["trace_id"] = hex.EncodeToString(traceID[:])
+ exemplarLabels[prometheustranslator.ExemplarTraceIDKey] = hex.EncodeToString(traceID[:])
}
if spanID := e.SpanID(); !spanID.IsEmpty() {
- exemplarLabels["span_id"] = hex.EncodeToString(spanID[:])
+ exemplarLabels[prometheustranslator.ExemplarSpanIDKey] = hex.EncodeToString(spanID[:])
}
var value float64
@@ -147,7 +143,12 @@ func (c *collector) convertGauge(metric pmetric.Metric, resourceAttrs pcommon.Ma
case pmetric.NumberDataPointValueTypeDouble:
value = ip.DoubleValue()
}
- m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, value, attributes...)
+ metricType := prometheus.GaugeValue
+ originalType, ok := metric.Metadata().Get(prometheustranslator.MetricMetadataTypeKey)
+ if ok && originalType.Str() == string(model.MetricTypeUnknown) {
+ metricType = prometheus.UntypedValue
+ }
+ m, err := prometheus.NewConstMetric(desc, metricType, value, attributes...)
if err != nil {
return nil, err
}
@@ -327,7 +328,7 @@ func (c *collector) createTargetInfoMetrics(resourceAttrs []pcommon.Map) ([]prom
labels[model.InstanceLabel] = instance
}
- name := targetMetricName
+ name := prometheustranslator.TargetInfoMetricName
if len(c.namespace) > 0 {
name = c.namespace + "_" + name
}
@@ -365,7 +366,7 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
targetMetrics, err := c.createTargetInfoMetrics(resourceAttrs)
if err != nil {
- c.logger.Error(fmt.Sprintf("failed to convert metric %s: %s", targetMetricName, err.Error()))
+ c.logger.Error(fmt.Sprintf("failed to convert metric %s: %s", prometheustranslator.TargetInfoMetricName, err.Error()))
}
for _, m := range targetMetrics {
ch <- m
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/internal/metadata/generated_status.go
index 53a6268cd7..ec8e6dd593 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/internal/metadata/generated_status.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/internal/metadata/generated_status.go
@@ -4,8 +4,6 @@ package metadata
import (
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/trace"
)
var (
@@ -15,11 +13,3 @@ var (
const (
MetricsStability = component.StabilityLevelBeta
)
-
-func Meter(settings component.TelemetrySettings) metric.Meter {
- return settings.MeterProvider.Meter("otelcol/prometheus")
-}
-
-func Tracer(settings component.TelemetrySettings) trace.Tracer {
- return settings.TracerProvider.Tracer("otelcol/prometheus")
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/internal/metadata/generated_telemetry.go
new file mode 100644
index 0000000000..4dc34b4367
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/internal/metadata/generated_telemetry.go
@@ -0,0 +1,17 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func Meter(settings component.TelemetrySettings) metric.Meter {
+ return settings.MeterProvider.Meter("otelcol/prometheus")
+}
+
+func Tracer(settings component.TelemetrySettings) trace.Tracer {
+ return settings.TracerProvider.Tracer("otelcol/prometheus")
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/metadata.yaml
index 362bc5ad66..6ed4fc5ef5 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/metadata.yaml
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/metadata.yaml
@@ -14,3 +14,8 @@ status:
tests:
config:
endpoint: "127.0.0.1:1234"
+ goleak:
+ ignore:
+ top:
+ # See https://github.com/census-instrumentation/opencensus-go/issues/1191 for more information.
+ - "go.opencensus.io/stats/view.(*worker).start"
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/prometheus.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/prometheus.go
index 9309bd54be..f8efe36e22 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/prometheus.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter/prometheus.go
@@ -58,7 +58,7 @@ func newPrometheusExporter(config *Config, set exporter.CreateSettings) (*promet
}
func (pe *prometheusExporter) Start(ctx context.Context, host component.Host) error {
- ln, err := pe.config.ToListenerContext(ctx)
+ ln, err := pe.config.ToListener(ctx)
if err != nil {
return err
}
@@ -67,7 +67,7 @@ func (pe *prometheusExporter) Start(ctx context.Context, host component.Host) er
mux := http.NewServeMux()
mux.Handle("/metrics", pe.handler)
- srv, err := pe.config.ToServerContext(ctx, host, pe.settings, mux)
+ srv, err := pe.config.ToServer(ctx, host, pe.settings, mux)
if err != nil {
return err
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/README.md
index f48f3e9d00..33ae05e160 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/README.md
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/README.md
@@ -100,6 +100,13 @@ Several helper files are leveraged to provide additional capabilities automatica
- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)
- [Retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md), note that the exporter doesn't support `sending_queue` but provides `remote_write_queue`.
+### Feature gates
+This exporter has feature gate: `exporter.prometheusremotewritexporter.RetryOn429`.
+When this feature gate is enable the prometheus remote write exporter will retry on 429 http status code with the provided retry configuration.
+It currently doesn't support respecting the http header `Retry-After` if provided since the retry library used doesn't support this feature.
+
+To enable it run collector with enabled feature gate `exporter.prometheusremotewritexporter.RetryOn429`. This can be done by executing it with one additional parameter - `--feature-gates=telemetry.useOtelForInternalMetrics`.
+
## Metric names and labels normalization
OpenTelemetry metric names and attributes are normalized to be compliant with Prometheus naming rules. [Details on this normalization process are described in the Prometheus translator module](../../pkg/translator/prometheus/).
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/documentation.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/documentation.md
new file mode 100644
index 0000000000..dc8d0cdf59
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/documentation.md
@@ -0,0 +1,23 @@
+[comment]: <> (Code generated by mdatagen. DO NOT EDIT.)
+
+# prometheusremotewrite
+
+## Internal Telemetry
+
+The following telemetry is emitted by this component.
+
+### exporter_prometheusremotewrite_failed_translations
+
+Number of translation operations that failed to translate metrics from Otel to Prometheus
+
+| Unit | Metric Type | Value Type | Monotonic |
+| ---- | ----------- | ---------- | --------- |
+| 1 | Sum | Int | true |
+
+### exporter_prometheusremotewrite_translated_time_series
+
+Number of Prometheus time series that were translated from OTel metrics
+
+| Unit | Metric Type | Value Type | Monotonic |
+| ---- | ----------- | ---------- | --------- |
+| 1 | Sum | Int | true |
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/exporter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/exporter.go
index 52ad2ecd1f..1e2c22908a 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/exporter.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/exporter.go
@@ -41,17 +41,16 @@ type prwTelemetry interface {
}
type prwTelemetryOtel struct {
- failedTranslations metric.Int64Counter
- translatedTimeSeries metric.Int64Counter
- otelAttrs []attribute.KeyValue
+ telemetryBuilder *metadata.TelemetryBuilder
+ otelAttrs []attribute.KeyValue
}
func (p *prwTelemetryOtel) recordTranslationFailure(ctx context.Context) {
- p.failedTranslations.Add(ctx, 1, metric.WithAttributes(p.otelAttrs...))
+ p.telemetryBuilder.ExporterPrometheusremotewriteFailedTranslations.Add(ctx, 1, metric.WithAttributes(p.otelAttrs...))
}
func (p *prwTelemetryOtel) recordTranslatedTimeSeries(ctx context.Context, numTS int) {
- p.translatedTimeSeries.Add(ctx, int64(numTS), metric.WithAttributes(p.otelAttrs...))
+ p.telemetryBuilder.ExporterPrometheusremotewriteTranslatedTimeSeries.Add(ctx, int64(numTS), metric.WithAttributes(p.otelAttrs...))
}
// prwExporter converts OTLP metrics to Prometheus remote write TimeSeries and sends them to a remote endpoint.
@@ -66,33 +65,24 @@ type prwExporter struct {
clientSettings *confighttp.ClientConfig
settings component.TelemetrySettings
retrySettings configretry.BackOffConfig
+ retryOnHTTP429 bool
wal *prweWAL
exporterSettings prometheusremotewrite.Settings
telemetry prwTelemetry
}
func newPRWTelemetry(set exporter.CreateSettings) (prwTelemetry, error) {
-
- meter := metadata.Meter(set.TelemetrySettings)
- // TODO: create helper functions similar to the processor helper: BuildCustomMetricName
- prefix := "exporter/" + metadata.Type.String() + "/"
- failedTranslations, errFailedTranslation := meter.Int64Counter(prefix+"failed_translations",
- metric.WithDescription("Number of translation operations that failed to translate metrics from Otel to Prometheus"),
- metric.WithUnit("1"),
- )
-
- translatedTimeSeries, errTranslatedMetrics := meter.Int64Counter(prefix+"translated_time_series",
- metric.WithDescription("Number of Prometheus time series that were translated from OTel metrics"),
- metric.WithUnit("1"),
- )
+ telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings)
+ if err != nil {
+ return nil, err
+ }
return &prwTelemetryOtel{
- failedTranslations: failedTranslations,
- translatedTimeSeries: translatedTimeSeries,
+ telemetryBuilder: telemetryBuilder,
otelAttrs: []attribute.KeyValue{
attribute.String("exporter", set.ID.String()),
},
- }, errors.Join(errFailedTranslation, errTranslatedMetrics)
+ }, nil
}
// newPRWExporter initializes a new prwExporter instance and sets fields accordingly.
@@ -124,6 +114,7 @@ func newPRWExporter(cfg *Config, set exporter.CreateSettings) (*prwExporter, err
clientSettings: &cfg.ClientConfig,
settings: set.TelemetrySettings,
retrySettings: cfg.BackOffConfig,
+ retryOnHTTP429: retryOn429FeatureGate.IsEnabled(),
exporterSettings: prometheusremotewrite.Settings{
Namespace: cfg.Namespace,
ExternalLabels: sanitizedLabels,
@@ -141,7 +132,7 @@ func newPRWExporter(cfg *Config, set exporter.CreateSettings) (*prwExporter, err
// Start creates the prometheus client
func (prwe *prwExporter) Start(ctx context.Context, host component.Host) (err error) {
- prwe.client, err = prwe.clientSettings.ToClientContext(ctx, host, prwe.settings)
+ prwe.client, err = prwe.clientSettings.ToClient(ctx, host, prwe.settings)
if err != nil {
return err
}
@@ -329,6 +320,13 @@ func (prwe *prwExporter) execute(ctx context.Context, writeReq *prompb.WriteRequ
if resp.StatusCode >= 500 && resp.StatusCode < 600 {
return rerr
}
+
+ // 429 errors are recoverable and the exporter should retry if RetryOnHTTP429 enabled
+ // Reference: https://github.com/prometheus/prometheus/pull/12677
+ if prwe.retryOnHTTP429 && resp.StatusCode == 429 {
+ return rerr
+ }
+
return backoff.Permanent(consumererror.NewPermanent(rerr))
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/factory.go
index d0a0f85555..b84fa7ce15 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/factory.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/factory.go
@@ -14,11 +14,19 @@ import (
"go.opentelemetry.io/collector/config/configretry"
"go.opentelemetry.io/collector/exporter"
"go.opentelemetry.io/collector/exporter/exporterhelper"
+ "go.opentelemetry.io/collector/featuregate"
"github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry"
)
+var retryOn429FeatureGate = featuregate.GlobalRegistry().MustRegister(
+ "exporter.prometheusremotewritexporter.RetryOn429",
+ featuregate.StageAlpha,
+ featuregate.WithRegisterFromVersion("v0.101.0"),
+ featuregate.WithRegisterDescription("When enabled, the Prometheus remote write exporter will retry 429 http status code. Requires exporter.prometheusremotewritexporter.metrics.RetryOn429 to be enabled."),
+)
+
// NewFactory creates a new Prometheus Remote Write exporter.
func NewFactory() exporter.Factory {
return exporter.NewFactory(
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata/generated_status.go
index 1ad10c8ecf..6a7a98fa90 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata/generated_status.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata/generated_status.go
@@ -4,8 +4,6 @@ package metadata
import (
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/trace"
)
var (
@@ -15,11 +13,3 @@ var (
const (
MetricsStability = component.StabilityLevelBeta
)
-
-func Meter(settings component.TelemetrySettings) metric.Meter {
- return settings.MeterProvider.Meter("otelcol/prometheusremotewrite")
-}
-
-func Tracer(settings component.TelemetrySettings) trace.Tracer {
- return settings.TracerProvider.Tracer("otelcol/prometheusremotewrite")
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata/generated_telemetry.go
new file mode 100644
index 0000000000..2dc39557ca
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/internal/metadata/generated_telemetry.go
@@ -0,0 +1,71 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "errors"
+
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/noop"
+ "go.opentelemetry.io/otel/trace"
+
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/config/configtelemetry"
+)
+
+func Meter(settings component.TelemetrySettings) metric.Meter {
+ return settings.MeterProvider.Meter("otelcol/prometheusremotewrite")
+}
+
+func Tracer(settings component.TelemetrySettings) trace.Tracer {
+ return settings.TracerProvider.Tracer("otelcol/prometheusremotewrite")
+}
+
+// TelemetryBuilder provides an interface for components to report telemetry
+// as defined in metadata and user config.
+type TelemetryBuilder struct {
+ ExporterPrometheusremotewriteFailedTranslations metric.Int64Counter
+ ExporterPrometheusremotewriteTranslatedTimeSeries metric.Int64Counter
+ level configtelemetry.Level
+}
+
+// telemetryBuilderOption applies changes to default builder.
+type telemetryBuilderOption func(*TelemetryBuilder)
+
+// WithLevel sets the current telemetry level for the component.
+func WithLevel(lvl configtelemetry.Level) telemetryBuilderOption {
+ return func(builder *TelemetryBuilder) {
+ builder.level = lvl
+ }
+}
+
+// NewTelemetryBuilder provides a struct with methods to update all internal telemetry
+// for a component
+func NewTelemetryBuilder(settings component.TelemetrySettings, options ...telemetryBuilderOption) (*TelemetryBuilder, error) {
+ builder := TelemetryBuilder{level: configtelemetry.LevelBasic}
+ for _, op := range options {
+ op(&builder)
+ }
+ var (
+ err, errs error
+ meter metric.Meter
+ )
+ if builder.level >= configtelemetry.LevelBasic {
+ meter = Meter(settings)
+ } else {
+ meter = noop.Meter{}
+ }
+ builder.ExporterPrometheusremotewriteFailedTranslations, err = meter.Int64Counter(
+ "exporter_prometheusremotewrite_failed_translations",
+ metric.WithDescription("Number of translation operations that failed to translate metrics from Otel to Prometheus"),
+ metric.WithUnit("1"),
+ )
+ errs = errors.Join(errs, err)
+ builder.ExporterPrometheusremotewriteTranslatedTimeSeries, err = meter.Int64Counter(
+ "exporter_prometheusremotewrite_translated_time_series",
+ metric.WithDescription("Number of Prometheus time series that were translated from OTel metrics"),
+ metric.WithUnit("1"),
+ )
+ errs = errors.Join(errs, err)
+ return &builder, errs
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/metadata.yaml
index 2bd409f979..492aaacc1d 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/metadata.yaml
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter/metadata.yaml
@@ -10,4 +10,21 @@ status:
active: [Aneurysm9, rapphil]
tests:
- expect_consumer_error: true
\ No newline at end of file
+ expect_consumer_error: true
+
+telemetry:
+ metrics:
+ exporter_prometheusremotewrite_failed_translations:
+ enabled: true
+ description: Number of translation operations that failed to translate metrics from Otel to Prometheus
+ unit: 1
+ sum:
+ value_type: int
+ monotonic: true
+ exporter_prometheusremotewrite_translated_time_series:
+ enabled: true
+ description: Number of Prometheus time series that were translated from OTel metrics
+ unit: 1
+ sum:
+ value_type: int
+ monotonic: true
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter/exporter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter/exporter.go
index 878d0267fc..fb23755a07 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter/exporter.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter/exporter.go
@@ -109,7 +109,7 @@ func (se *sapmExporter) pushTraceData(ctx context.Context, td ptrace.Traces) err
ingestResponse, err := se.client.ExportWithAccessTokenAndGetResponse(ctx, batches, accessToken)
if se.config.LogDetailedResponse && ingestResponse != nil {
if ingestResponse.Err != nil {
- se.logger.Debug("Failed to get response from trace ingest", zap.Error(ingestResponse.Err))
+ se.logger.Error("Failed to get response from trace ingest", zap.Error(ingestResponse.Err))
} else {
se.logger.Debug("Detailed response from ingest", zap.ByteString("response", ingestResponse.Body))
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter/internal/metadata/generated_status.go
index 17fa827679..61919e0e6d 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter/internal/metadata/generated_status.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter/internal/metadata/generated_status.go
@@ -4,8 +4,6 @@ package metadata
import (
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/trace"
)
var (
@@ -15,11 +13,3 @@ var (
const (
TracesStability = component.StabilityLevelBeta
)
-
-func Meter(settings component.TelemetrySettings) metric.Meter {
- return settings.MeterProvider.Meter("otelcol/sapm")
-}
-
-func Tracer(settings component.TelemetrySettings) trace.Tracer {
- return settings.TracerProvider.Tracer("otelcol/sapm")
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter/internal/metadata/generated_telemetry.go
new file mode 100644
index 0000000000..f681ad9447
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter/internal/metadata/generated_telemetry.go
@@ -0,0 +1,17 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func Meter(settings component.TelemetrySettings) metric.Meter {
+ return settings.MeterProvider.Meter("otelcol/sapm")
+}
+
+func Tracer(settings component.TelemetrySettings) trace.Tracer {
+ return settings.TracerProvider.Tracer("otelcol/sapm")
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter/metadata.yaml
index 47ae147c81..91eb60ad91 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter/metadata.yaml
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter/metadata.yaml
@@ -11,4 +11,9 @@ status:
tests:
- expect_consumer_error: true
\ No newline at end of file
+ expect_consumer_error: true
+ goleak:
+ ignore:
+ top:
+ # See https://github.com/census-instrumentation/opencensus-go/issues/1191 for more information.
+ - "go.opencensus.io/stats/view.(*worker).start"
\ No newline at end of file
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/config.go
index d7b27608c9..808af0cf9f 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/config.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/config.go
@@ -150,7 +150,7 @@ type DimensionClientConfig struct {
Timeout time.Duration `mapstructure:"timeout"`
}
-func (cfg *Config) getMetricTranslator(logger *zap.Logger) (*translation.MetricTranslator, error) {
+func (cfg *Config) getMetricTranslator(logger *zap.Logger, done chan struct{}) (*translation.MetricTranslator, error) {
rules := defaultTranslationRules
if cfg.TranslationRules != nil {
// Previous way to disable default translation rules.
@@ -166,7 +166,7 @@ func (cfg *Config) getMetricTranslator(logger *zap.Logger) (*translation.MetricT
if cfg.DisableDefaultTranslationRules {
rules = []translation.Rule{}
}
- metricTranslator, err := translation.NewMetricTranslator(rules, cfg.DeltaTranslationTTL)
+ metricTranslator, err := translation.NewMetricTranslator(rules, cfg.DeltaTranslationTTL, done)
if err != nil {
return nil, fmt.Errorf("invalid \"%s\": %w", translationRulesConfigKey, err)
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/exporter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/exporter.go
index c0d4cc3cd6..ce53b2d2e9 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/exporter.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/exporter.go
@@ -72,7 +72,7 @@ func newSignalFxExporter(
return nil, errors.New("nil config")
}
- metricTranslator, err := config.getMetricTranslator(createSettings.TelemetrySettings.Logger)
+ metricTranslator, err := config.getMetricTranslator(createSettings.TelemetrySettings.Logger, make(chan struct{}))
if err != nil {
return nil, err
}
@@ -125,7 +125,7 @@ func (se *signalfxExporter) start(ctx context.Context, host component.Host) (err
sendOTLPHistograms: se.config.SendOTLPHistograms,
}
- apiTLSCfg, err := se.config.APITLSSettings.LoadTLSConfig()
+ apiTLSCfg, err := se.config.APITLSSettings.LoadTLSConfig(ctx)
if err != nil {
return fmt.Errorf("could not load API TLS config: %w", err)
}
@@ -218,7 +218,7 @@ func (se *signalfxExporter) startLogs(ctx context.Context, host component.Host)
func (se *signalfxExporter) createClient(ctx context.Context, host component.Host) (*http.Client, error) {
se.config.ClientConfig.TLSSetting = se.config.IngestTLSSettings
- return se.config.ToClientContext(ctx, host, se.telemetrySettings)
+ return se.config.ToClient(ctx, host, se.telemetrySettings)
}
func (se *signalfxExporter) pushMetrics(ctx context.Context, md pmetric.Metrics) error {
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/correlation/correlation.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/correlation/correlation.go
index 29c7ab7835..38ce7dd097 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/correlation/correlation.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/correlation/correlation.go
@@ -56,7 +56,7 @@ func newCorrelationClient(ctx context.Context, cfg *Config, accessToken configop
return nil, fmt.Errorf("failed to parse correlation endpoint URL %q: %w", cfg.ClientConfig.Endpoint, err)
}
- httpClient, err := cfg.ToClientContext(ctx, host, params.TelemetrySettings)
+ httpClient, err := cfg.ToClient(ctx, host, params.TelemetrySettings)
if err != nil {
return nil, fmt.Errorf("failed to create correlation API client: %w", err)
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/metadata/generated_status.go
index 574ac42ee3..c33c58f337 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/metadata/generated_status.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/metadata/generated_status.go
@@ -4,8 +4,6 @@ package metadata
import (
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/trace"
)
var (
@@ -17,11 +15,3 @@ const (
MetricsStability = component.StabilityLevelBeta
LogsStability = component.StabilityLevelBeta
)
-
-func Meter(settings component.TelemetrySettings) metric.Meter {
- return settings.MeterProvider.Meter("otelcol/signalfx")
-}
-
-func Tracer(settings component.TelemetrySettings) trace.Tracer {
- return settings.TracerProvider.Tracer("otelcol/signalfx")
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/metadata/generated_telemetry.go
new file mode 100644
index 0000000000..e8c0535e27
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/metadata/generated_telemetry.go
@@ -0,0 +1,17 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func Meter(settings component.TelemetrySettings) metric.Meter {
+ return settings.MeterProvider.Meter("otelcol/signalfx")
+}
+
+func Tracer(settings component.TelemetrySettings) trace.Tracer {
+ return settings.TracerProvider.Tracer("otelcol/signalfx")
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation/delta_translator.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation/delta_translator.go
index d2cc10a417..26f19c06a4 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation/delta_translator.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation/delta_translator.go
@@ -14,12 +14,12 @@ type deltaTranslator struct {
prevPts *ttlmap.TTLMap
}
-func newDeltaTranslator(ttl int64) *deltaTranslator {
+func newDeltaTranslator(ttl int64, done chan struct{}) *deltaTranslator {
sweepIntervalSeconds := ttl / 2
if sweepIntervalSeconds == 0 {
sweepIntervalSeconds = 1
}
- m := ttlmap.New(sweepIntervalSeconds, ttl)
+ m := ttlmap.New(sweepIntervalSeconds, ttl, done)
m.Start()
return &deltaTranslator{prevPts: m}
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation/translator.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation/translator.go
index 528ef7c264..b899de0dc8 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation/translator.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation/translator.go
@@ -226,7 +226,7 @@ type MetricTranslator struct {
deltaTranslator *deltaTranslator
}
-func NewMetricTranslator(rules []Rule, ttl int64) (*MetricTranslator, error) {
+func NewMetricTranslator(rules []Rule, ttl int64, done chan struct{}) (*MetricTranslator, error) {
err := validateTranslationRules(rules)
if err != nil {
return nil, err
@@ -240,7 +240,7 @@ func NewMetricTranslator(rules []Rule, ttl int64) (*MetricTranslator, error) {
return &MetricTranslator{
rules: rules,
dimensionsMap: createDimensionsMap(rules),
- deltaTranslator: newDeltaTranslator(ttl),
+ deltaTranslator: newDeltaTranslator(ttl, done),
}, nil
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/metadata.yaml
index 7e11fe3224..fbc5bb90b4 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/metadata.yaml
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/metadata.yaml
@@ -18,3 +18,5 @@ tests:
retry_on_failure:
enabled: false
expect_consumer_error: true
+ goleak:
+ skip: true
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy/internal/metadata/generated_status.go
index 839a546933..577abdfeba 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy/internal/metadata/generated_status.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy/internal/metadata/generated_status.go
@@ -4,8 +4,6 @@ package metadata
import (
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/trace"
)
var (
@@ -15,11 +13,3 @@ var (
const (
ExtensionStability = component.StabilityLevelBeta
)
-
-func Meter(settings component.TelemetrySettings) metric.Meter {
- return settings.MeterProvider.Meter("otelcol/awsproxy")
-}
-
-func Tracer(settings component.TelemetrySettings) trace.Tracer {
- return settings.TracerProvider.Tracer("otelcol/awsproxy")
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy/internal/metadata/generated_telemetry.go
new file mode 100644
index 0000000000..3dcb08e7c3
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy/internal/metadata/generated_telemetry.go
@@ -0,0 +1,17 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func Meter(settings component.TelemetrySettings) metric.Meter {
+ return settings.MeterProvider.Meter("otelcol/awsproxy")
+}
+
+func Tracer(settings component.TelemetrySettings) trace.Tracer {
+ return settings.TracerProvider.Tracer("otelcol/awsproxy")
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy/metadata.yaml
index 591679a363..502d14e810 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy/metadata.yaml
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy/metadata.yaml
@@ -11,3 +11,5 @@ status:
tests:
skip_lifecycle: true
+ goleak:
+ skip: true
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/healthcheckextension.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/healthcheckextension.go
index abebd3b485..91ce98b5ed 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/healthcheckextension.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/healthcheckextension.go
@@ -33,12 +33,12 @@ var _ extension.PipelineWatcher = (*healthCheckExtension)(nil)
func (hc *healthCheckExtension) Start(ctx context.Context, host component.Host) error {
hc.logger.Info("Starting health_check extension", zap.Any("config", hc.config))
- ln, err := hc.config.ToListenerContext(ctx)
+ ln, err := hc.config.ToListener(ctx)
if err != nil {
return fmt.Errorf("failed to bind to address %s: %w", hc.config.Endpoint, err)
}
- hc.server, err = hc.config.ToServerContext(ctx, host, hc.settings, nil)
+ hc.server, err = hc.config.ToServer(ctx, host, hc.settings, nil)
if err != nil {
return err
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/internal/metadata/generated_status.go
index 64a946c0e4..dce6a55133 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/internal/metadata/generated_status.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/internal/metadata/generated_status.go
@@ -4,8 +4,6 @@ package metadata
import (
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/trace"
)
var (
@@ -15,11 +13,3 @@ var (
const (
ExtensionStability = component.StabilityLevelBeta
)
-
-func Meter(settings component.TelemetrySettings) metric.Meter {
- return settings.MeterProvider.Meter("otelcol/healthcheck")
-}
-
-func Tracer(settings component.TelemetrySettings) trace.Tracer {
- return settings.TracerProvider.Tracer("otelcol/healthcheck")
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/internal/metadata/generated_telemetry.go
new file mode 100644
index 0000000000..e193abbb6b
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/internal/metadata/generated_telemetry.go
@@ -0,0 +1,17 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func Meter(settings component.TelemetrySettings) metric.Meter {
+ return settings.MeterProvider.Meter("otelcol/healthcheck")
+}
+
+func Tracer(settings component.TelemetrySettings) trace.Tracer {
+ return settings.TracerProvider.Tracer("otelcol/healthcheck")
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/metadata.yaml
index 2286ebf7a3..1e200d1122 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/metadata.yaml
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension/metadata.yaml
@@ -12,3 +12,8 @@ status:
tests:
config:
endpoint: localhost:0
+ goleak:
+ ignore:
+ top:
+ # See https://github.com/census-instrumentation/opencensus-go/issues/1191 for more information.
+ - "go.opencensus.io/stats/view.(*worker).start"
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver/internal/metadata/generated_status.go
index 4f90e8eaf2..0e66f707d1 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver/internal/metadata/generated_status.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver/internal/metadata/generated_status.go
@@ -4,8 +4,6 @@ package metadata
import (
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/trace"
)
var (
@@ -15,11 +13,3 @@ var (
const (
ExtensionStability = component.StabilityLevelBeta
)
-
-func Meter(settings component.TelemetrySettings) metric.Meter {
- return settings.MeterProvider.Meter("otelcol/ecsobserver")
-}
-
-func Tracer(settings component.TelemetrySettings) trace.Tracer {
- return settings.TracerProvider.Tracer("otelcol/ecsobserver")
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver/internal/metadata/generated_telemetry.go
new file mode 100644
index 0000000000..33ae3d3994
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver/internal/metadata/generated_telemetry.go
@@ -0,0 +1,17 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func Meter(settings component.TelemetrySettings) metric.Meter {
+ return settings.MeterProvider.Meter("otelcol/ecsobserver")
+}
+
+func Tracer(settings component.TelemetrySettings) trace.Tracer {
+ return settings.TracerProvider.Tracer("otelcol/ecsobserver")
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension/internal/metadata/generated_status.go
index 7d86497380..9221e62c17 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension/internal/metadata/generated_status.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension/internal/metadata/generated_status.go
@@ -4,8 +4,6 @@ package metadata
import (
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/trace"
)
var (
@@ -15,11 +13,3 @@ var (
const (
ExtensionStability = component.StabilityLevelBeta
)
-
-func Meter(settings component.TelemetrySettings) metric.Meter {
- return settings.MeterProvider.Meter("otelcol/pprof")
-}
-
-func Tracer(settings component.TelemetrySettings) trace.Tracer {
- return settings.TracerProvider.Tracer("otelcol/pprof")
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension/internal/metadata/generated_telemetry.go
new file mode 100644
index 0000000000..96965e8c63
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension/internal/metadata/generated_telemetry.go
@@ -0,0 +1,17 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func Meter(settings component.TelemetrySettings) metric.Meter {
+ return settings.MeterProvider.Meter("otelcol/pprof")
+}
+
+func Tracer(settings component.TelemetrySettings) trace.Tracer {
+ return settings.TracerProvider.Tracer("otelcol/pprof")
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension/internal/metadata/generated_status.go
index b05c500dcb..eba38fdc5f 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension/internal/metadata/generated_status.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension/internal/metadata/generated_status.go
@@ -4,8 +4,6 @@ package metadata
import (
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/trace"
)
var (
@@ -15,11 +13,3 @@ var (
const (
ExtensionStability = component.StabilityLevelBeta
)
-
-func Meter(settings component.TelemetrySettings) metric.Meter {
- return settings.MeterProvider.Meter("otelcol/sigv4auth")
-}
-
-func Tracer(settings component.TelemetrySettings) trace.Tracer {
- return settings.TracerProvider.Tracer("otelcol/sigv4auth")
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension/internal/metadata/generated_telemetry.go
new file mode 100644
index 0000000000..5b02f319a5
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension/internal/metadata/generated_telemetry.go
@@ -0,0 +1,17 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func Meter(settings component.TelemetrySettings) metric.Meter {
+ return settings.MeterProvider.Meter("otelcol/sigv4auth")
+}
+
+func Tracer(settings component.TelemetrySettings) trace.Tracer {
+ return settings.TracerProvider.Tracer("otelcol/sigv4auth")
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/README.md
index 3a95f87ab2..ee41b647fb 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/README.md
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/README.md
@@ -34,6 +34,10 @@ The default timeout is `1s`.
`compaction.max_transaction_size` (default: 65536): defines maximum size of the compaction transaction.
A value of zero will ignore transaction sizes.
+`compaction.cleanup_on_start` (default: false) - specifies if removal of compaction temporary files is performed on start.
+It will remove all temporary files in the compaction directory (those which start with `tempdb`),
+temp files will be left if a previous run of the process is killed while compacting.
+
### Rebound (online) compaction
For rebound compaction, there are two additional parameters available:
@@ -96,40 +100,48 @@ exporters:
nop:
```
-## Feature Gates
-
-See the [Collector feature gates](https://github.com/open-telemetry/opentelemetry-collector/blob/main/featuregate/README.md#collector-feature-gates) for an overview of feature gates in the collector.
-
-### `extension.filestorage.replaceUnsafeCharacters`
+## Replacing unsafe characters in component names
-When enabled, characters that are not safe in file names are replaced in component name using the extension before creating the file name to store data by the extension.
+The extension uses the type and name of the component using the extension to create a file where the component's data is stored.
+For example, if a Filelog receiver named `filelog/logs` uses the extension, its data is stored in a file named `receiver_filelog_logs`.
-For example, for a Filelog receiver named `filelog/logs/json`, the data is stored:
+Sometimes the component name contains characters that either have special meaning in paths - like `/` - or are problematic or even forbidden in file names (depending on the host operating system), like `?` or `|`.
+To prevent surprising or erroneous behavior, some characters in the component names are replaced before creating the file name to store data by the extension.
-- in path `receiver_filelog_logs/json` with the feature flag disabled (note that this is a file named `json` inside directory named `receiver_filelog_logs`).
-- in file `receiver_filelog_logs~007Ejson` with the feature flag enabled.
+For example, for a Filelog receiver named `filelog/logs/container`, the component name `logs/container` is sanitized into `logs~007Econtainer` and the data is stored in a file named `receiver_filelog_logs~007Econtainer`.
-This replacement is done to prevent surprising behavior or errors in the File Storage extension.
-
-The feature replaces all usafe characters with a tilde `~` and the character's [Unicode number][unicode_chars] in hex.
+Every unsafe character is replaced with a tilde `~` and the character's [Unicode number][unicode_chars] in hex.
The only safe characters are: uppercase and lowercase ASCII letters `A-Z` and `a-z`, digits `0-9`, dot `.`, hyphen `-`, underscore `_`.
+The tilde `~` character is also replaced even though it is a safe character, to make sure that the sanitized component name never overlaps with a component name that does not require sanitization.
-Changing the state of this feature gate may change the path to the file that the extension is writing component's data to. This may lead to loss of the data stored in the original path.
-
-Before enabling this feature gate, ideally make sure that all component names that use the File Storage extension have names that only contain safe characters.
-In case you want to keep using unsafe characters in your component names, you may want to rename the files used for storage before enabling this feature gate.
-For example, `mv ./receiver_filelog_logs/json ./receiver_filelog_logs~007Ejson`.
-
-For more details, see the following issues:
-
-- [File storage extension - invalid file name characters must be encoded #3148](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/3148)
-- [[filestorage] receiver name sanitization #20731](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/20731)
-
-The schedule for this feature gate is:
+[unicode_chars]: https://en.wikipedia.org/wiki/List_of_Unicode_characters
-- Introduced in v0.87.0 (October 2023) as `alpha` - disabled by default.
-- Moved to `beta` in v0.92.0 (January 2024) - enabled by default.
-- Moved to `stable` in April 2024 - cannot be disabled.
-- Removed three releases after `stable`.
-[unicode_chars]: https://en.wikipedia.org/wiki/List_of_Unicode_characters
+## Troubleshooting
+
+_Currently, the File Storage extension uses [bbolt](https://github.com/etcd-io/bbolt) to store and read data on disk. The
+following troubleshooting method works for bbolt-managed files. As such, there is no guarantee that this method will continue to work in the future, particularly if the extension switches away from bbolt._
+
+When troubleshooting components that use the File Storage extension, it is sometimes helpful to read the raw contents of
+files created by the extension for the component. The simplest way to read files
+created by the File Storage extension is to use the strings utility ([Linux](https://linux.die.net/man/1/strings),
+[Windows](https://learn.microsoft.com/en-us/sysinternals/downloads/strings)).
+
+For example, here are the contents of the file created by the File Storage extension when it's configured as the storage
+for the `filelog` receiver.
+
+```sh
+$ strings /tmp/otelcol/file_storage/filelogreceiver/receiver_filelog_
+default
+file_input.knownFiles2
+{"Fingerprint":{"first_bytes":"MzEwNzkKMjE5Cg=="},"Offset":10,"FileAttributes":{"log.file.name":"1.log"},"HeaderFinalized":false,"FlushState":{"LastDataChange":"2024-03-20T18:16:18.164331-07:00","LastDataLength":0}}
+{"Fingerprint":{"first_bytes":"MjQ0MDMK"},"Offset":6,"FileAttributes":{"log.file.name":"2.log"},"HeaderFinalized":false,"FlushState":{"LastDataChange":"2024-03-20T18:16:39.96429-07:00","LastDataLength":0}}
+default
+file_input.knownFiles2
+{"Fingerprint":{"first_bytes":"MzEwNzkKMjE5Cg=="},"Offset":10,"FileAttributes":{"log.file.name":"1.log"},"HeaderFinalized":false,"FlushState":{"LastDataChange":"2024-03-20T18:16:18.164331-07:00","LastDataLength":0}}
+{"Fingerprint":{"first_bytes":"MjQ0MDMK"},"Offset":6,"FileAttributes":{"log.file.name":"2.log"},"HeaderFinalized":false,"FlushState":{"LastDataChange":"2024-03-20T18:16:39.96429-07:00","LastDataLength":0}}
+default
+file_input.knownFiles2
+{"Fingerprint":{"first_bytes":"MzEwNzkKMjE5Cg=="},"Offset":10,"FileAttributes":{"log.file.name":"1.log"},"HeaderFinalized":false,"FlushState":{"LastDataChange":"2024-03-20T18:16:18.164331-07:00","LastDataLength":0}}
+{"Fingerprint":{"first_bytes":"MjQ0MDMK"},"Offset":6,"FileAttributes":{"log.file.name":"2.log"},"HeaderFinalized":false,"FlushState":{"LastDataChange":"2024-03-20T18:16:39.96429-07:00","LastDataLength":0}}
+```
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/client.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/client.go
index c8fca4ba02..b97cad73c0 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/client.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/client.go
@@ -20,6 +20,8 @@ import (
var defaultBucket = []byte(`default`)
const (
+ TempDbPrefix = "tempdb"
+
elapsedKey = "elapsed"
directoryKey = "directory"
tempDirectoryKey = "tempDirectory"
@@ -152,7 +154,7 @@ func (c *fileStorageClient) Compact(compactionDirectory string, timeout time.Dur
var compactedDb *bbolt.DB
// create temporary file in compactionDirectory
- file, err = os.CreateTemp(compactionDirectory, "tempdb")
+ file, err = os.CreateTemp(compactionDirectory, TempDbPrefix)
if err != nil {
return err
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/config.go
index d71bbe0234..19e288a765 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/config.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/config.go
@@ -45,6 +45,10 @@ type CompactionConfig struct {
MaxTransactionSize int64 `mapstructure:"max_transaction_size,omitempty"`
// CheckInterval specifies frequency of compaction check
CheckInterval time.Duration `mapstructure:"check_interval,omitempty"`
+ // CleanupOnStart specifies removal of temporary files is performed on start.
+ // It will remove all the files in the compaction directory starting with tempdb,
+ // temp files will be left if a previous run of the process is killed while compacting.
+ CleanupOnStart bool `mapstructure:"cleanup_on_start,omitempty"`
}
func (cfg *Config) Validate() error {
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/extension.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/extension.go
index 2b74c5fa3d..f9d0467acc 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/extension.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/extension.go
@@ -5,25 +5,18 @@ package filestorage // import "github.com/open-telemetry/opentelemetry-collector
import (
"context"
+ "errors"
"fmt"
+ "os"
"path/filepath"
"strings"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/extension"
"go.opentelemetry.io/collector/extension/experimental/storage"
- "go.opentelemetry.io/collector/featuregate"
"go.uber.org/zap"
)
-var replaceUnsafeCharactersFeatureGate = featuregate.GlobalRegistry().MustRegister(
- "extension.filestorage.replaceUnsafeCharacters",
- featuregate.StageBeta,
- featuregate.WithRegisterDescription("When enabled, characters that are not safe in file paths are replaced in component name using the extension. For example, the data for component `filelog/logs/json` will be stored in file `receiver_filelog_logs~007Ejson` and not in `receiver_filelog_logs/json`."),
- featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/3148"),
- featuregate.WithRegisterFromVersion("v0.87.0"),
-)
-
type localFileStorage struct {
cfg *Config
logger *zap.Logger
@@ -39,8 +32,11 @@ func newLocalFileStorage(logger *zap.Logger, config *Config) (extension.Extensio
}, nil
}
-// Start does nothing
+// Start runs cleanup if configured
func (lfs *localFileStorage) Start(context.Context, component.Host) error {
+ if lfs.cfg.Compaction.CleanupOnStart {
+ return lfs.cleanup(lfs.cfg.Compaction.Directory)
+ }
return nil
}
@@ -60,9 +56,7 @@ func (lfs *localFileStorage) GetClient(_ context.Context, kind component.Kind, e
rawName = fmt.Sprintf("%s_%s_%s_%s", kindString(kind), ent.Type(), ent.Name(), name)
}
- if replaceUnsafeCharactersFeatureGate.IsEnabled() {
- rawName = sanitize(rawName)
- }
+ rawName = sanitize(rawName)
absoluteName := filepath.Join(lfs.cfg.Directory, rawName)
client, err := newClient(lfs.logger, absoluteName, lfs.cfg.Timeout, lfs.cfg.Compaction, !lfs.cfg.FSync)
@@ -104,7 +98,7 @@ func sanitize(name string) string {
// https://en.wikipedia.org/wiki/List_of_Unicode_characters
// For example, the slash is replaced with "~002F", and the tilde itself is replaced with "~007E".
// We perform replacement on the tilde even though it is a safe character to make sure that the sanitized component name
- // never overlaps with a component name that does not reqire sanitization.
+ // never overlaps with a component name that does not require sanitization.
var sanitized strings.Builder
for _, character := range name {
if isSafe(character) {
@@ -134,3 +128,30 @@ func isSafe(character rune) bool {
}
return false
}
+
+// cleanup left compaction temporary files from previous killed process
+func (lfs *localFileStorage) cleanup(compactionDirectory string) error {
+ pattern := filepath.Join(compactionDirectory, fmt.Sprintf("%s*", TempDbPrefix))
+ contents, err := filepath.Glob(pattern)
+ if err != nil {
+ lfs.logger.Info("cleanup error listing temporary files",
+ zap.Error(err))
+ return err
+ }
+
+ var errs []error
+ for _, item := range contents {
+ err = os.Remove(item)
+ if err == nil {
+ lfs.logger.Debug("cleanup",
+ zap.String("deletedFile", item))
+ } else {
+ errs = append(errs, err)
+ }
+ }
+ if errs != nil {
+ lfs.logger.Info("cleanup errors",
+ zap.Error(errors.Join(errs...)))
+ }
+ return nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/factory.go
index ef3e04e9d3..18178c54a4 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/factory.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/factory.go
@@ -45,6 +45,7 @@ func createDefaultConfig() component.Config {
ReboundNeededThresholdMiB: defaultReboundNeededThresholdMib,
ReboundTriggerThresholdMiB: defaultReboundTriggerThresholdMib,
CheckInterval: defaultCompactionInterval,
+ CleanupOnStart: false,
},
Timeout: time.Second,
FSync: false,
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/internal/metadata/generated_status.go
index 7f4eae03a9..f1831d98fa 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/internal/metadata/generated_status.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/internal/metadata/generated_status.go
@@ -4,8 +4,6 @@ package metadata
import (
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/trace"
)
var (
@@ -15,11 +13,3 @@ var (
const (
ExtensionStability = component.StabilityLevelBeta
)
-
-func Meter(settings component.TelemetrySettings) metric.Meter {
- return settings.MeterProvider.Meter("otelcol/filestorage")
-}
-
-func Tracer(settings component.TelemetrySettings) trace.Tracer {
- return settings.TracerProvider.Tracer("otelcol/filestorage")
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/internal/metadata/generated_telemetry.go
new file mode 100644
index 0000000000..6a98524544
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage/internal/metadata/generated_telemetry.go
@@ -0,0 +1,17 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func Meter(settings component.TelemetrySettings) metric.Meter {
+ return settings.MeterProvider.Meter("otelcol/filestorage")
+}
+
+func Tracer(settings component.TelemetrySettings) trace.Tracer {
+ return settings.TracerProvider.Tracer("otelcol/filestorage")
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil/client.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil/client.go
index e558537560..613a12a439 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil/client.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil/client.go
@@ -61,7 +61,7 @@ func defaultClient(
host component.Host,
settings component.TelemetrySettings,
) (*clientImpl, error) {
- client, err := clientSettings.ToClientContext(ctx, host, settings)
+ client, err := clientSettings.ToClient(ctx, host, settings)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy/conn.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy/conn.go
index 24d48d6540..e0e850428b 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy/conn.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy/conn.go
@@ -205,7 +205,7 @@ type stsCalls struct {
getSTSCredsFromRegionEndpoint func(log *zap.Logger, sess *session.Session, region, roleArn string) *credentials.Credentials
}
-// getSTSCreds gets STS credentials first from the regional endpoint, then from the primary
+// getCreds gets STS credentials first from the regional endpoint, then from the primary
// region in the respective AWS partition if the regional endpoint is disabled.
func (s *stsCalls) getCreds(region string, roleArn string) (*credentials.Credentials, error) {
sess, err := session.NewSession()
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/ttlmap/ttl_map.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/ttlmap/ttl_map.go
index 9485feb4e5..5a0647702d 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/ttlmap/ttl_map.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/ttlmap/ttl_map.go
@@ -12,6 +12,7 @@ import (
type TTLMap struct {
md *ttlMapData
sweepInterval int64
+ done chan struct{}
}
// New creates a TTLMap. The sweepIntervalSeconds arg indicates how often
@@ -19,19 +20,28 @@ type TTLMap struct {
// entries can persist before getting evicted. Call Start() on the returned
// TTLMap to begin periodic sweeps which check for expiration and evict entries
// as needed.
-func New(sweepIntervalSeconds int64, maxAgeSeconds int64) *TTLMap {
+// done is the channel that will be used to signal to the timer to stop its work.
+func New(sweepIntervalSeconds int64, maxAgeSeconds int64, done chan struct{}) *TTLMap {
return &TTLMap{
sweepInterval: sweepIntervalSeconds,
md: newTTLMapData(maxAgeSeconds),
+ done: done,
}
}
// Start starts periodic sweeps for expired entries in the underlying map.
func (m *TTLMap) Start() {
go func() {
- d := time.Duration(m.sweepInterval) * time.Second
- for now := range time.Tick(d) {
- m.md.sweep(now.Unix())
+ ticker := time.NewTicker(time.Duration(m.sweepInterval) * time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case now := <-ticker.C:
+ m.md.sweep(now.Unix())
+ case <-m.done:
+ return
+ }
}
}()
}
@@ -49,6 +59,12 @@ func (m *TTLMap) Get(k string) any {
return m.md.get(k)
}
+func (m *TTLMap) Shutdown() {
+ if m.done != nil {
+ close(m.done)
+ }
+}
+
type entry struct {
createTime int64
v any
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/textutils/encoding.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/textutils/encoding.go
index 91df68be07..209c2cbc77 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/textutils/encoding.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/textutils/encoding.go
@@ -14,7 +14,7 @@ import (
"golang.org/x/text/transform"
)
-// NewBasicConfig creates a new Encoding config
+// NewEncodingConfig creates a new Encoding config
func NewEncodingConfig() EncodingConfig {
return EncodingConfig{
Encoding: "utf-8",
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go
index 29c5207418..52170879a5 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go
@@ -86,10 +86,10 @@ var ctimeSubstitutes = map[string]string{
// %S - Second as a zero-padded decimal number (00, 01, ..., 59)
// %L - Millisecond as a decimal number, zero-padded on the left (000, 001, ..., 999)
// %f - Microsecond as a decimal number, zero-padded on the left (000000, ..., 999999)
-// %s - Nanosecond as a decimal number, zero-padded on the left (000000, ..., 999999)
+// %s - Nanosecond as a decimal number, zero-padded on the left (00000000, ..., 99999999)
// %z - UTC offset in the form ±HHMM[SS[.ffffff]] or empty(+0000, -0400)
// %Z - Timezone name or abbreviation or empty (UTC, EST, CST)
-// %D, %x - Short MM/DD/YY date, equivalent to %m/%d/%y
+// %D, %x - Short MM/DD/YYYY date, equivalent to %m/%d/%y
// %F - Short YYYY-MM-DD date, equivalent to %Y-%m-%d
// %T, %X - ISO 8601 time format (HH:MM:SS), equivalent to %H:%M:%S
// %r - 12-hour clock time (02:55:02 pm)
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog/agent.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog/agent.go
deleted file mode 100644
index 7bba14cad2..0000000000
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog/agent.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package datadog // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog"
-
-import (
- "context"
- "net/http"
- "runtime"
- "sync"
- "time"
-
- pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace"
- "github.com/DataDog/datadog-agent/pkg/trace/agent"
- "github.com/DataDog/datadog-agent/pkg/trace/api"
- traceconfig "github.com/DataDog/datadog-agent/pkg/trace/config"
- "github.com/DataDog/datadog-agent/pkg/trace/stats"
- "github.com/DataDog/datadog-agent/pkg/trace/telemetry"
- "github.com/DataDog/datadog-agent/pkg/trace/timing"
- "github.com/DataDog/datadog-go/v5/statsd"
- "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics"
- "go.opentelemetry.io/collector/pdata/ptrace"
-)
-
-// TraceAgent specifies a minimal trace agent instance that is able to process traces and output stats.
-type TraceAgent struct {
- *agent.Agent
-
- // pchan specifies the channel that will be used to output Datadog Trace Agent API Payloads
- // resulting from ingested OpenTelemetry spans.
- pchan chan *api.Payload
-
- // wg waits for all goroutines to exit.
- wg sync.WaitGroup
-
- // exit signals the agent to shut down.
- exit chan struct{}
-}
-
-// newAgent creates a new unstarted traceagent using the given context. Call Start to start the traceagent.
-// The out channel will receive outoing stats payloads resulting from spans ingested using the Ingest method.
-func NewAgent(ctx context.Context, out chan *pb.StatsPayload, metricsClient statsd.ClientInterface, timingReporter timing.Reporter) *TraceAgent {
- return NewAgentWithConfig(ctx, traceconfig.New(), out, metricsClient, timingReporter)
-}
-
-// newAgentWithConfig creates a new traceagent with the given config cfg. Used in tests; use newAgent instead.
-func NewAgentWithConfig(ctx context.Context, cfg *traceconfig.AgentConfig, out chan *pb.StatsPayload, metricsClient statsd.ClientInterface, timingReporter timing.Reporter) *TraceAgent {
- // disable the HTTP receiver
- cfg.ReceiverPort = 0
- // set the API key to succeed startup; it is never used nor needed
- cfg.Endpoints[0].APIKey = "skip_check"
- // set the default hostname to the translator's placeholder; in the case where no hostname
- // can be deduced from incoming traces, we don't know the default hostname (because it is set
- // in the exporter). In order to avoid duplicating the hostname setting in the processor and
- // exporter, we use a placeholder and fill it in later (in the Datadog Exporter or Agent OTLP
- // Ingest). This gives a better user experience.
- cfg.Hostname = metrics.UnsetHostnamePlaceholder
- pchan := make(chan *api.Payload, 1000)
- a := agent.NewAgent(ctx, cfg, telemetry.NewNoopCollector(), metricsClient)
- // replace the Concentrator (the component which computes and flushes APM Stats from incoming
- // traces) with our own, which uses the 'out' channel.
- a.Concentrator = stats.NewConcentrator(cfg, out, time.Now(), metricsClient)
- // ...and the same for the ClientStatsAggregator; we don't use it here, but it is also a source
- // of stats which should be available to us.
- a.ClientStatsAggregator = stats.NewClientStatsAggregator(cfg, out, metricsClient)
- // lastly, start the OTLP receiver, which will be used to introduce ResourceSpans into the traceagent,
- // so that we can transform them to Datadog spans and receive stats.
- a.OTLPReceiver = api.NewOTLPReceiver(pchan, cfg, metricsClient, timingReporter)
- return &TraceAgent{
- Agent: a,
- exit: make(chan struct{}),
- pchan: pchan,
- }
-}
-
-// Start starts the traceagent, making it ready to ingest spans.
-func (p *TraceAgent) Start() {
- // we don't need to start the full agent, so we only start a set of minimal
- // components needed to compute stats:
- for _, starter := range []interface{ Start() }{
- p.Concentrator,
- p.ClientStatsAggregator,
- // we don't need the samplers' nor the processor's functionalities;
- // but they are used by the agent nevertheless, so they need to be
- // active and functioning.
- p.PrioritySampler,
- p.ErrorsSampler,
- p.NoPrioritySampler,
- p.EventProcessor,
- } {
- starter.Start()
- }
-
- p.goDrain()
- p.goProcess()
-}
-
-// Stop stops the traceagent, making it unable to ingest spans. Do not call Ingest after Stop.
-func (p *TraceAgent) Stop() {
- for _, stopper := range []interface{ Stop() }{
- p.Concentrator,
- p.ClientStatsAggregator,
- p.PrioritySampler,
- p.ErrorsSampler,
- p.NoPrioritySampler,
- p.EventProcessor,
- } {
- stopper.Stop()
- }
- close(p.exit)
- p.wg.Wait()
-}
-
-// goDrain drains the TraceWriter channel, ensuring it won't block. We don't need the traces,
-// nor do we have a running TraceWrite. We just want the outgoing stats.
-func (p *TraceAgent) goDrain() {
- p.wg.Add(1)
- go func() {
- defer p.wg.Done()
- for {
- select {
- case <-p.TraceWriter.In:
- // we don't write these traces anywhere; drain the channel
- case <-p.exit:
- return
- }
- }
- }()
-}
-
-// Ingest processes the given spans within the traceagent and outputs stats through the output channel
-// provided to newAgent. Do not call Ingest on an unstarted or stopped traceagent.
-func (p *TraceAgent) Ingest(ctx context.Context, traces ptrace.Traces) {
- rspanss := traces.ResourceSpans()
- for i := 0; i < rspanss.Len(); i++ {
- rspans := rspanss.At(i)
- p.OTLPReceiver.ReceiveResourceSpans(ctx, rspans, http.Header{})
- // ...the call transforms the OTLP Spans into a Datadog payload and sends the result
- // down the p.pchan channel
-
- }
-}
-
-// goProcesses runs the main loop which takes incoming payloads, processes them and generates stats.
-// It then picks up those stats and converts them to metrics.
-func (p *TraceAgent) goProcess() {
- for i := 0; i < runtime.NumCPU(); i++ {
- p.wg.Add(1)
- go func() {
- defer p.wg.Done()
- for {
- select {
- case payload := <-p.pchan:
- p.Process(payload)
- // ...the call processes the payload and outputs stats via the 'out' channel
- // provided to newAgent
- case <-p.exit:
- return
- }
- }
- }()
- }
-}
-
-var _ Ingester = (*TraceAgent)(nil)
-
-// An Ingester is able to ingest traces. Implemented by traceagent.
-type Ingester interface {
- // Start starts the ingester.
- Start()
-
- // Ingest ingests the set of traces.
- Ingest(ctx context.Context, traces ptrace.Traces)
-
- // Stop stops the ingester.
- Stop()
-}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog/metadata.yaml
deleted file mode 100644
index 27c4ee13b9..0000000000
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog/metadata.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-status:
- codeowners:
- active: [mx-psi, dineshg13]
- emeritus: [gbbr]
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr/matcher.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr/matcher.go
index f73e9f8e8a..037b4888d4 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr/matcher.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr/matcher.go
@@ -25,6 +25,16 @@ func Not[K any](matcher BoolExpr[K]) BoolExpr[K] {
return notMatcher[K]{matcher: matcher}
}
+type alwaysTrueMatcher[K any] struct{}
+
+func (alm alwaysTrueMatcher[K]) Eval(_ context.Context, _ K) (bool, error) {
+ return true, nil
+}
+
+func AlwaysTrue[K any]() BoolExpr[K] {
+ return alwaysTrueMatcher[K]{}
+}
+
type orMatcher[K any] struct {
matchers []BoolExpr[K]
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/filter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/filter.go
index 6324c8a35b..e4dad6ee93 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/filter.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/filter.go
@@ -12,6 +12,7 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent"
)
@@ -111,3 +112,19 @@ func NewBoolExprForResource(conditions []string, functions map[string]ottl.Facto
c := ottlresource.NewConditionSequence(statements, set, ottlresource.WithConditionSequenceErrorMode(errorMode))
return &c, nil
}
+
+// NewBoolExprForScope creates a BoolExpr[ottlscope.TransformContext] that will return true if any of the given OTTL conditions evaluate to true.
+// The passed in functions should use the ottlresource.TransformContext.
+// If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected
+func NewBoolExprForScope(conditions []string, functions map[string]ottl.Factory[ottlscope.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlscope.TransformContext], error) {
+ parser, err := ottlscope.NewParser(functions, set)
+ if err != nil {
+ return nil, err
+ }
+ statements, err := parser.ParseConditions(conditions)
+ if err != nil {
+ return nil, err
+ }
+ c := ottlscope.NewConditionSequence(statements, set, ottlscope.WithConditionSequenceErrorMode(errorMode))
+ return &c, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/functions.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/functions.go
index c86ee64f89..c3ce56ce4a 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/functions.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/functions.go
@@ -14,6 +14,7 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
@@ -40,6 +41,10 @@ func StandardDataPointFuncs() map[string]ottl.Factory[ottldatapoint.TransformCon
return ottlfuncs.StandardConverters[ottldatapoint.TransformContext]()
}
+func StandardScopeFuncs() map[string]ottl.Factory[ottlscope.TransformContext] {
+ return ottlfuncs.StandardConverters[ottlscope.TransformContext]()
+}
+
func StandardLogFuncs() map[string]ottl.Factory[ottllog.TransformContext] {
return ottlfuncs.StandardConverters[ottllog.TransformContext]()
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig/config.go
index b1ef343d97..578a6fe079 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig/config.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig/config.go
@@ -4,16 +4,24 @@
package k8sconfig // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig"
import (
+ "context"
"fmt"
"net"
"net/http"
"os"
+ "time"
quotaclientset "github.com/openshift/client-go/quota/clientset/versioned"
+ api_v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/fields"
+ "k8s.io/apimachinery/pkg/runtime"
k8sruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
k8s "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
)
@@ -179,3 +187,25 @@ func MakeOpenShiftQuotaClient(apiConf APIConfig) (quotaclientset.Interface, erro
return client, nil
}
+
+func NewNodeSharedInformer(client k8s.Interface, nodeName string, watchSyncPeriod time.Duration) cache.SharedInformer {
+ informer := cache.NewSharedInformer(
+ &cache.ListWatch{
+ ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {
+ if nodeName != "" {
+ opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", nodeName).String()
+ }
+ return client.CoreV1().Nodes().List(context.Background(), opts)
+ },
+ WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) {
+ if nodeName != "" {
+ opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", nodeName).String()
+ }
+ return client.CoreV1().Nodes().Watch(context.Background(), opts)
+ },
+ },
+ &api_v1.Node{},
+ watchSyncPeriod,
+ )
+ return informer
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/authentication.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/authentication.go
index a05edc6486..104d6152a1 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/authentication.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/authentication.go
@@ -4,6 +4,7 @@
package kafka // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka"
import (
+ "context"
"crypto/sha256"
"crypto/sha512"
"fmt"
@@ -135,7 +136,7 @@ func configureSASL(config SASLConfig, saramaConfig *sarama.Config) error {
}
func configureTLS(config configtls.ClientConfig, saramaConfig *sarama.Config) error {
- tlsConfig, err := config.LoadTLSConfig()
+ tlsConfig, err := config.LoadTLSConfig(context.Background())
if err != nil {
return fmt.Errorf("error loading tls config: %w", err)
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/azure/metadata.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/azure/metadata.go
index b6241feab8..d64576e1ef 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/azure/metadata.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/azure/metadata.go
@@ -37,15 +37,21 @@ func NewProvider() Provider {
}
}
+type ComputeTagsListMetadata struct {
+ Name string `json:"name"`
+ Value string `json:"value"`
+}
+
// ComputeMetadata is the Azure IMDS compute metadata response format
type ComputeMetadata struct {
- Location string `json:"location"`
- Name string `json:"name"`
- VMID string `json:"vmID"`
- VMSize string `json:"vmSize"`
- SubscriptionID string `json:"subscriptionID"`
- ResourceGroupName string `json:"resourceGroupName"`
- VMScaleSetName string `json:"vmScaleSetName"`
+ Location string `json:"location"`
+ Name string `json:"name"`
+ VMID string `json:"vmID"`
+ VMSize string `json:"vmSize"`
+ SubscriptionID string `json:"subscriptionID"`
+ ResourceGroupName string `json:"resourceGroupName"`
+ VMScaleSetName string `json:"vmScaleSetName"`
+ TagsList []ComputeTagsListMetadata `json:"tagsList"`
}
// Metadata queries a given endpoint and parses the output to the Azure IMDS format
@@ -74,12 +80,13 @@ func (p *azureProviderImpl) Metadata(ctx context.Context) (*ComputeMetadata, err
resp, err := p.client.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to query Azure IMDS: %w", err)
- } else if resp.StatusCode != 200 {
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
//lint:ignore ST1005 Azure is a capitalized proper noun here
return nil, fmt.Errorf("Azure IMDS replied with status code: %s", resp.Status)
}
- defer resp.Body.Close()
respBody, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read Azure IMDS reply: %w", err)
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk/common.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk/common.go
index 7792b73381..a98d064b65 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk/common.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk/common.go
@@ -23,11 +23,14 @@ const (
DefaultSeverityTextLabel = "otel.log.severity.text"
DefaultSeverityNumberLabel = "otel.log.severity.number"
HECTokenHeader = "Splunk"
- HecTokenLabel = "com.splunk.hec.access_token" // #nosec
+ HTTPSplunkChannelHeader = "X-Splunk-Request-Channel"
+
+ HecTokenLabel = "com.splunk.hec.access_token" // #nosec
// HecEventMetricType is the type of HEC event. Set to metric, as per https://docs.splunk.com/Documentation/Splunk/8.0.3/Metrics/GetMetricsInOther.
HecEventMetricType = "metric"
DefaultRawPath = "/services/collector/raw"
DefaultHealthPath = "/services/collector/health"
+ DefaultAckPath = "/services/collector/ack"
)
// AccessTokenPassthroughConfig configures passing through access tokens.
@@ -48,12 +51,16 @@ type Event struct {
}
// IsMetric returns true if the Splunk event is a metric.
-func (e Event) IsMetric() bool {
+func (e *Event) IsMetric() bool {
return e.Event == HecEventMetricType || (e.Event == nil && len(e.GetMetricValues()) > 0)
}
// GetMetricValues extracts metric key value pairs from a Splunk HEC metric.
-func (e Event) GetMetricValues() map[string]any {
+func (e *Event) GetMetricValues() map[string]any {
+ if v, ok := e.Fields["metric_name"]; ok {
+ return map[string]any{v.(string): e.Fields["_value"]}
+ }
+
values := map[string]any{}
for k, v := range e.Fields {
if strings.HasPrefix(k, "metric_name:") {
@@ -112,3 +119,7 @@ type HecToOtelAttrs struct {
// Host indicates the mapping of the host field to a specific unified model attribute.
Host string `mapstructure:"host"`
}
+
+type AckRequest struct {
+ Acks []uint64 `json:"acks"`
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md
index 2c25f74600..b57b2de5cd 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md
@@ -4,12 +4,12 @@
| ------------- |-----------|
| Stability | [alpha]: traces, metrics, logs |
| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Apkg%2Fottl%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Apkg%2Fottl) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Apkg%2Fottl%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Apkg%2Fottl) |
-| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@TylerHelmuth](https://www.github.com/TylerHelmuth), [@kentquirk](https://www.github.com/kentquirk), [@bogdandrutu](https://www.github.com/bogdandrutu), [@evan-bradley](https://www.github.com/evan-bradley) |
+| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@TylerHelmuth](https://www.github.com/TylerHelmuth), [@kentquirk](https://www.github.com/kentquirk), [@bogdandrutu](https://www.github.com/bogdandrutu), [@evan-bradley](https://www.github.com/evan-bradley) \| Seeking more code owners! |
[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha
-The OpenTelemetry Transformation Language is a language for transforming open telemetry data based on the [OpenTelemetry Collector Processing Exploration](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/processing.md).
+The OpenTelemetry Transformation Language is a language for transforming open telemetry data based on the [OpenTelemetry Collector Processing Exploration](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/rfcs/processing.md).
This package reads in OTTL statements and converts them to invokable functions/booleans based on the OTTL's grammar.
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/README.md
new file mode 100644
index 0000000000..4df2ae3616
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/README.md
@@ -0,0 +1,27 @@
+# Instrumentation Scope Context
+
+The Instrumentation Scope Context is a Context implementation for [pdata Instrumentation Scope](https://github.com/open-telemetry/opentelemetry-collector/blob/main/pdata/pcommon/generated_instrumentationscope.go), the Collector's internal representation for OTLP instrumentation scope data. This Context should be used when interacting only with OTLP instrumentation scope.
+
+## Paths
+In general, the Instrumentation Scope Context supports accessing pdata using the field names from the instrumentation section in the [common proto](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/common/v1/common.proto). All integers are returned and set via `int64`. All doubles are returned and set via `float64`.
+
+The following paths are supported.
+
+| path | field accessed | type |
+|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------|
+| cache | the value of the current transform context's temporary cache. cache can be used as a temporary placeholder for data during complex transformations | pcommon.Map |
+| cache\[""\] | the value of an item in cache. Supports multiple indexes to access nested fields. | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| resource | resource of the instrumentation scope being processed | pcommon.Resource |
+| resource.attributes | resource attributes of the instrumentation scope being processed | pcommon.Map |
+| resource.attributes\[""\] | the value of the resource attribute of the instrumentation scope being processed. Supports multiple indexes to access nested fields. | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| resource.dropped_attributes_count | number of dropped attributes of the resource of the instrumentation scope being processed | int64 |
+| name | name of the instrumentation scope of the scope being processed | string |
+| version | version of the instrumentation scope of the scope being processed | string |
+| dropped_attributes_count | number of dropped attributes of the instrumentation scope of the scope being processed | int64 |
+| attributes | instrumentation scope attributes of the scope being processed | pcommon.Map |
+| attributes\[""\] | the value of the instrumentation scope attribute of the scope being processed. Supports multiple indexes to access nested fields. | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+
+
+## Enums
+
+The Instrumentation Scope Context does not define any Enums at this time.
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/scope.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/scope.go
new file mode 100644
index 0000000000..f7a9d92ee8
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/scope.go
@@ -0,0 +1,145 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package ottlscope // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope"
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal"
+)
+
+var _ internal.ResourceContext = TransformContext{}
+var _ internal.InstrumentationScopeContext = TransformContext{}
+
+type TransformContext struct {
+ instrumentationScope pcommon.InstrumentationScope
+ resource pcommon.Resource
+ cache pcommon.Map
+}
+
+type Option func(*ottl.Parser[TransformContext])
+
+func NewTransformContext(instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource) TransformContext {
+ return TransformContext{
+ instrumentationScope: instrumentationScope,
+ resource: resource,
+ cache: pcommon.NewMap(),
+ }
+}
+
+func (tCtx TransformContext) GetInstrumentationScope() pcommon.InstrumentationScope {
+ return tCtx.instrumentationScope
+}
+
+func (tCtx TransformContext) GetResource() pcommon.Resource {
+ return tCtx.resource
+}
+
+func (tCtx TransformContext) getCache() pcommon.Map {
+ return tCtx.cache
+}
+
+func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) {
+ pep := pathExpressionParser{telemetrySettings}
+ p, err := ottl.NewParser[TransformContext](
+ functions,
+ pep.parsePath,
+ telemetrySettings,
+ ottl.WithEnumParser[TransformContext](parseEnum),
+ )
+ if err != nil {
+ return ottl.Parser[TransformContext]{}, err
+ }
+ for _, opt := range options {
+ opt(&p)
+ }
+ return p, nil
+}
+
+type StatementSequenceOption func(*ottl.StatementSequence[TransformContext])
+
+func WithStatementSequenceErrorMode(errorMode ottl.ErrorMode) StatementSequenceOption {
+ return func(s *ottl.StatementSequence[TransformContext]) {
+ ottl.WithStatementSequenceErrorMode[TransformContext](errorMode)(s)
+ }
+}
+
+func NewStatementSequence(statements []*ottl.Statement[TransformContext], telemetrySettings component.TelemetrySettings, options ...StatementSequenceOption) ottl.StatementSequence[TransformContext] {
+ s := ottl.NewStatementSequence(statements, telemetrySettings)
+ for _, op := range options {
+ op(&s)
+ }
+ return s
+}
+
+type ConditionSequenceOption func(*ottl.ConditionSequence[TransformContext])
+
+func WithConditionSequenceErrorMode(errorMode ottl.ErrorMode) ConditionSequenceOption {
+ return func(c *ottl.ConditionSequence[TransformContext]) {
+ ottl.WithConditionSequenceErrorMode[TransformContext](errorMode)(c)
+ }
+}
+
+func NewConditionSequence(conditions []*ottl.Condition[TransformContext], telemetrySettings component.TelemetrySettings, options ...ConditionSequenceOption) ottl.ConditionSequence[TransformContext] {
+ c := ottl.NewConditionSequence(conditions, telemetrySettings)
+ for _, op := range options {
+ op(&c)
+ }
+ return c
+}
+
+func parseEnum(_ *ottl.EnumSymbol) (*ottl.Enum, error) {
+ return nil, fmt.Errorf("instrumentation scope context does not provide Enum support")
+}
+
+type pathExpressionParser struct {
+ telemetrySettings component.TelemetrySettings
+}
+
+func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ottl.GetSetter[TransformContext], error) {
+ if path == nil {
+ return nil, fmt.Errorf("path cannot be nil")
+ }
+ switch path.Name() {
+ case "cache":
+ if path.Keys() == nil {
+ return accessCache(), nil
+ }
+ return accessCacheKey(path.Keys()), nil
+ case "resource":
+ return internal.ResourcePathGetSetter[TransformContext](path.Next())
+ default:
+ return internal.ScopePathGetSetter[TransformContext](path)
+ }
+}
+
+func accessCache() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(_ context.Context, tCtx TransformContext) (any, error) {
+ return tCtx.getCache(), nil
+ },
+ Setter: func(_ context.Context, tCtx TransformContext, val any) error {
+ if m, ok := val.(pcommon.Map); ok {
+ m.CopyTo(tCtx.getCache())
+ }
+ return nil
+ },
+ }
+}
+
+func accessCacheKey(key []ottl.Key[TransformContext]) ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (any, error) {
+ return internal.GetMapValue[TransformContext](ctx, tCtx, tCtx.getCache(), key)
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val any) error {
+ return internal.SetMapValue[TransformContext](ctx, tCtx, tCtx.getCache(), key, val)
+ },
+ }
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go
index 86782be3c4..98f730a24d 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go
@@ -283,8 +283,8 @@ func (p *Parser[K]) buildArgs(ed editor, argsVal reflect.Value) error {
switch {
case arg.Value.Enum != nil:
name = string(*arg.Value.Enum)
- case arg.Value.FunctionName != nil:
- name = *arg.Value.FunctionName
+ case arg.FunctionName != nil:
+ name = *arg.FunctionName
default:
return fmt.Errorf("invalid function name given")
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go
index ebd8e58ec7..04352a6b7a 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go
@@ -218,8 +218,9 @@ type converter struct {
}
type argument struct {
- Name string `parser:"(@(Lowercase(Uppercase | Lowercase)*) Equal)?"`
- Value value `parser:"@@"`
+ Name string `parser:"(@(Lowercase(Uppercase | Lowercase)*) Equal)?"`
+ Value value `parser:"( @@"`
+ FunctionName *string `parser:"| @(Uppercase(Uppercase | Lowercase)*) )"`
}
func (a *argument) checkForCustomError() error {
@@ -236,7 +237,6 @@ type value struct {
String *string `parser:"| @String"`
Bool *boolean `parser:"| @Boolean"`
Enum *enumSymbol `parser:"| @Uppercase (?! Lowercase)"`
- FunctionName *string `parser:"| @(Uppercase(Uppercase | Lowercase)*)"`
List *list `parser:"| @@)"`
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/metadata.yaml
index e7333a2c5f..b326d06fb1 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/metadata.yaml
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/metadata.yaml
@@ -5,4 +5,5 @@ status:
stability:
alpha: [ traces, metrics, logs ]
codeowners:
- active: [TylerHelmuth, kentquirk, bogdandrutu, evan-bradley]
\ No newline at end of file
+ active: [TylerHelmuth, kentquirk, bogdandrutu, evan-bradley]
+ seeking_new: true
\ No newline at end of file
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/README.md
index 9b8aff6ffd..e9d0c62ff5 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/README.md
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/README.md
@@ -380,6 +380,7 @@ Available Converters:
- [Base64Decode](#base64decode)
- [Concat](#concat)
- [ConvertCase](#convertcase)
+- [Day](#day)
- [ExtractPatterns](#extractpatterns)
- [FNV](#fnv)
- [Hour](#hour)
@@ -392,12 +393,15 @@ Available Converters:
- [IsInt](#isint)
- [IsMap](#ismap)
- [IsMatch](#ismatch)
+- [IsList](#islist)
- [IsString](#isstring)
- [Len](#len)
- [Log](#log)
- [Microseconds](#microseconds)
- [Milliseconds](#milliseconds)
+- [Minute](#minute)
- [Minutes](#minutes)
+- [Month](#month)
- [Nanoseconds](#nanoseconds)
- [Now](#now)
- [ParseCSV](#parsecsv)
@@ -420,6 +424,7 @@ Available Converters:
- [UnixNano](#unixnano)
- [UnixSeconds](#unixseconds)
- [UUID](#UUID)
+- [Year](#year)
### Base64Decode
@@ -440,9 +445,9 @@ Examples:
`Concat(values[], delimiter)`
-The `Concat` Converter takes a delimiter and a sequence of values and concatenates their string representation. Unsupported values, such as lists or maps that may substantially increase payload size, are not added to the resulting string.
+The `Concat` Converter takes a sequence of values and a delimiter and concatenates their string representation. Unsupported values, such as lists or maps that may substantially increase payload size, are not added to the resulting string.
-`values` is a list of values passed as arguments. It supports paths, primitive values, and byte slices (such as trace IDs or span IDs).
+`values` is a list of values. It supports paths, primitive values, and byte slices (such as trace IDs or span IDs).
`delimiter` is a string value that is placed between strings during concatenation. If no delimiter is desired, then simply pass an empty string.
@@ -479,6 +484,20 @@ Examples:
- `ConvertCase(metric.name, "snake")`
+### Day
+
+`Day(value)`
+
+The `Day` Converter returns the day component from the specified time using the Go stdlib [`time.Day` function](https://pkg.go.dev/time#Time.Day).
+
+`value` is a `time.Time`. If `value` is another type, an error is returned.
+
+The returned type is `int64`.
+
+Examples:
+
+- `Day(Now())`
+
### Double
The `Double` Converter converts an inputted `value` into a double.
@@ -708,6 +727,22 @@ Examples:
- `IsMatch("string", ".*ring")`
+### IsList
+
+`IsList(value)`
+
+The `IsList` Converter returns true if the given value is a list.
+
+The `value` is either a path expression to a telemetry field to retrieve or a literal.
+
+If `value` is a `list`, `pcommon.ValueTypeSlice`. `pcommon.Slice`, or any other list type, then returns `true`, otherwise returns `false`.
+
+Examples:
+
+- `IsList(body)`
+
+- `IsList(attributes["maybe a slice"])`
+
### IsString
`IsString(value)`
@@ -792,6 +827,20 @@ Examples:
- `Milliseconds(Duration("1h"))`
+### Minute
+
+`Minute(value)`
+
+The `Minute` Converter returns the minute component from the specified time using the Go stdlib [`time.Minute` function](https://pkg.go.dev/time#Time.Minute).
+
+`value` is a `time.Time`. If `value` is another type, an error is returned.
+
+The returned type is `int64`.
+
+Examples:
+
+- `Minute(Now())`
+
### Minutes
`Minutes(value)`
@@ -806,6 +855,20 @@ Examples:
- `Minutes(Duration("1h"))`
+### Month
+
+`Month(value)`
+
+The `Month` Converter returns the month component from the specified time using the Go stdlib [`time.Month` function](https://pkg.go.dev/time#Time.Month).
+
+`value` is a `time.Time`. If `value` is another type, an error is returned.
+
+The returned type is `int64`.
+
+Examples:
+
+- `Month(Now())`
+
### Nanoseconds
`Nanoseconds(value)`
@@ -1114,15 +1177,69 @@ Examples:
### Time
-The `Time` Converter takes a string representation of a time and converts it to a Golang `time.Time`.
+`Time(target, format, Optional[location])`
-`time` is a string. `format` is a string.
+The `Time` Converter takes a string representation of a time and converts it to a Golang `time.Time`.
-If either `time` or `format` are nil, an error is returned. The parser used is the parser at [internal/coreinternal/parser](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/internal/coreinternal/timeutils). If the time and format do not follow the parsing rules used by this parser, an error is returned.
+`target` is a string. `format` is a string, `location` is an optional string.
+
+If either `target` or `format` are nil, an error is returned. The parser used is the parser at [internal/coreinternal/parser](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/internal/coreinternal/timeutils). If the `target` and `format` do not follow the parsing rules used by this parser, an error is returned.
+
+`format` denotes a textual representation of the time value formatted according to ctime-like format string. It follows [standard Go Layout formatting](https://pkg.go.dev/time#pkg-constants) with few additional substitutes:
+| substitution | description | examples |
+|-----|-----|-----|
+|`%Y` | Year as a zero-padded number | 0001, 0002, ..., 2019, 2020, ..., 9999 |
+|`%y` | Year, last two digits as a zero-padded number | 01, ..., 99 |
+|`%m` | Month as a zero-padded number | 01, 02, ..., 12 |
+|`%o` | Month as a space-padded number | 1, 2, ..., 12 |
+|`%q` | Month as an unpadded number | 1,2,...,12 |
+|`%b`, `%h` | Abbreviated month name | Jan, Feb, ... |
+|`%B` | Full month name | January, February, ... |
+|`%d` | Day of the month as a zero-padded number | 01, 02, ..., 31 |
+|`%e` | Day of the month as a space-padded number| 1, 2, ..., 31 |
+|`%g` | Day of the month as a unpadded number | 1,2,...,31 |
+|`%a` | Abbreviated weekday name | Sun, Mon, ... |
+|`%A` | Full weekday name | Sunday, Monday, ... |
+|`%H` | Hour (24-hour clock) as a zero-padded number | 00, ..., 24 |
+|`%I` | Hour (12-hour clock) as a zero-padded number | 00, ..., 12 |
+|`%l` | Hour 12-hour clock | 0, ..., 24 |
+|`%p` | Locale’s equivalent of either AM or PM | AM, PM |
+|`%P` | Locale’s equivalent of either am or pm | am, pm |
+|`%M` | Minute as a zero-padded number | 00, 01, ..., 59 |
+|`%S` | Second as a zero-padded number | 00, 01, ..., 59 |
+|`%L` | Millisecond as a zero-padded number | 000, 001, ..., 999 |
+|`%f` | Microsecond as a zero-padded number | 000000, ..., 999999 |
+|`%s` | Nanosecond as a zero-padded number | 00000000, ..., 99999999 |
+|`%z` | UTC offset in the form ±HHMM[SS[.ffffff]] or empty | +0000, -0400 |
+|`%Z` | Timezone name or abbreviation or empty | UTC, EST, CST |
+|`%D`, `%x` | Short MM/DD/YYYY date, equivalent to %m/%d/%y | 01/21/2031 |
+|`%F` | Short YYYY-MM-DD date, equivalent to %Y-%m-%d | 2031-01-21 |
+|`%T`,`%X` | ISO 8601 time format (HH:MM:SS), equivalent to %H:%M:%S | 02:55:02 |
+|`%r` | 12-hour clock time | 02:55:02 pm |
+|`%R` | 24-hour HH:MM time, equivalent to %H:%M | 13:55 |
+|`%n` | New-line character ('\n') | |
+|`%t` | Horizontal-tab character ('\t') | |
+|`%%` | A % sign | |
+|`%c` | Date and time representation | Mon Jan 02 15:04:05 2006 |
+
+`location` specifies a default time zone canonical ID to be used for date parsing in case it is not part of `format`.
+
+When loading `location`, this function will look for the IANA Time Zone database in the following locations in order:
+- a directory or uncompressed zip file named by the ZONEINFO environment variable
+- on a Unix system, the system standard installation location
+- $GOROOT/lib/time/zoneinfo.zip
+- the `time/tzdata` package, if it was imported.
+
+When building a Collector binary, importing `time/tzdata` in any Go source file will bundle the database into the binary, which guarantees the lookups will work regardless of the setup on the host setup. Note this will add roughly 500kB to binary size.
Examples:
- `Time("02/04/2023", "%m/%d/%Y")`
+- `Time("Feb 15, 2023", "%b %d, %Y")`
+- `Time("2023-05-26 12:34:56 HST", "%Y-%m-%d %H:%M:%S %Z")`
+- `Time("1986-10-01T00:17:33 MST", "%Y-%m-%dT%H:%M:%S %Z")`
+- `Time("2012-11-01T22:08:41+0000 EST", "%Y-%m-%dT%H:%M:%S%z %Z")`
+- `Time("2023-05-26 12:34:56", "%Y-%m-%d %H:%M:%S", "America/New_York")`
### TraceID
@@ -1227,6 +1344,20 @@ Examples:
The `UUID` function generates a v4 uuid string.
+### Year
+
+`Year(value)`
+
+The `Year` Converter returns the year component from the specified time using the Go stdlib [`time.Year` function](https://pkg.go.dev/time#Time.Year).
+
+`value` is a `time.Time`. If `value` is another type, an error is returned.
+
+The returned type is `int64`.
+
+Examples:
+
+- `Year(Now())`
+
## Function syntax
Functions should be named and formatted according to the following standards.
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_day.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_day.go
new file mode 100644
index 0000000000..ecdd0ceb2d
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_day.go
@@ -0,0 +1,39 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+type DayArguments[K any] struct {
+ Time ottl.TimeGetter[K]
+}
+
+func NewDayFactory[K any]() ottl.Factory[K] {
+ return ottl.NewFactory("Day", &DayArguments[K]{}, createDayFunction[K])
+}
+
+func createDayFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) {
+ args, ok := oArgs.(*DayArguments[K])
+
+ if !ok {
+ return nil, fmt.Errorf("DayFactory args must be of type *DayArguments[K]")
+ }
+
+ return Day(args.Time)
+}
+
+func Day[K any](time ottl.TimeGetter[K]) (ottl.ExprFunc[K], error) {
+ return func(ctx context.Context, tCtx K) (any, error) {
+ t, err := time.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ return int64(t.Day()), nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_list.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_list.go
new file mode 100644
index 0000000000..137ccb4707
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_list.go
@@ -0,0 +1,53 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/plog"
+ "go.opentelemetry.io/collector/pdata/pmetric"
+ "go.opentelemetry.io/collector/pdata/ptrace"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+type IsListArguments[K any] struct {
+ Target ottl.Getter[K]
+}
+
+func NewIsListFactory[K any]() ottl.Factory[K] {
+ return ottl.NewFactory("IsList", &IsListArguments[K]{}, createIsListFunction[K])
+}
+
+func createIsListFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) {
+ args, ok := oArgs.(*IsListArguments[K])
+
+ if !ok {
+ return nil, fmt.Errorf("IsListFactory args must be of type *IsListArguments[K]")
+ }
+
+ return isList(args.Target), nil
+}
+
+func isList[K any](target ottl.Getter[K]) ottl.ExprFunc[K] {
+ return func(ctx context.Context, tCtx K) (any, error) {
+ val, err := target.Get(ctx, tCtx)
+ if err != nil {
+ return false, err
+ }
+
+ switch valType := val.(type) {
+ case pcommon.Value:
+ return valType.Type() == pcommon.ValueTypeSlice, nil
+
+ case pcommon.Slice, plog.LogRecordSlice, plog.ResourceLogsSlice, plog.ScopeLogsSlice, pmetric.ExemplarSlice, pmetric.ExponentialHistogramDataPointSlice, pmetric.HistogramDataPointSlice, pmetric.MetricSlice, pmetric.NumberDataPointSlice, pmetric.ResourceMetricsSlice, pmetric.ScopeMetricsSlice, pmetric.SummaryDataPointSlice, pmetric.SummaryDataPointValueAtQuantileSlice, ptrace.ResourceSpansSlice, ptrace.ScopeSpansSlice, ptrace.SpanEventSlice, ptrace.SpanLinkSlice, ptrace.SpanSlice, []string, []bool, []int64, []float64, [][]byte, []any:
+ return true, nil
+ }
+
+ return false, nil
+ }
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_minute.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_minute.go
new file mode 100644
index 0000000000..f2ce05a179
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_minute.go
@@ -0,0 +1,39 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+type MinuteArguments[K any] struct {
+ Time ottl.TimeGetter[K]
+}
+
+func NewMinuteFactory[K any]() ottl.Factory[K] {
+ return ottl.NewFactory("Minute", &MinuteArguments[K]{}, createMinuteFunction[K])
+}
+
+func createMinuteFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) {
+ args, ok := oArgs.(*MinuteArguments[K])
+
+ if !ok {
+ return nil, fmt.Errorf("MinuteFactory args must be of type *MinuteArguments[K]")
+ }
+
+ return Minute(args.Time)
+}
+
+func Minute[K any](time ottl.TimeGetter[K]) (ottl.ExprFunc[K], error) {
+ return func(ctx context.Context, tCtx K) (any, error) {
+ t, err := time.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ return int64(t.Minute()), nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_month.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_month.go
new file mode 100644
index 0000000000..a4ab013d11
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_month.go
@@ -0,0 +1,39 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+type MonthArguments[K any] struct {
+ Time ottl.TimeGetter[K]
+}
+
+func NewMonthFactory[K any]() ottl.Factory[K] {
+ return ottl.NewFactory("Month", &MonthArguments[K]{}, createMonthFunction[K])
+}
+
+func createMonthFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) {
+ args, ok := oArgs.(*MonthArguments[K])
+
+ if !ok {
+ return nil, fmt.Errorf("MonthFactory args must be of type *MonthArguments[K]")
+ }
+
+ return Month(args.Time)
+}
+
+func Month[K any](time ottl.TimeGetter[K]) (ottl.ExprFunc[K], error) {
+ return func(ctx context.Context, tCtx K) (any, error) {
+ t, err := time.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ return int64(t.Month()), nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_time.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_time.go
index a6ef708b09..b6d793cc3e 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_time.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_time.go
@@ -12,8 +12,9 @@ import (
)
type TimeArguments[K any] struct {
- Time ottl.StringGetter[K]
- Format string
+ Time ottl.StringGetter[K]
+ Format string
+ Location ottl.Optional[string]
}
func NewTimeFactory[K any]() ottl.Factory[K] {
@@ -26,14 +27,20 @@ func createTimeFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ot
return nil, fmt.Errorf("TimeFactory args must be of type *TimeArguments[K]")
}
- return Time(args.Time, args.Format)
+ return Time(args.Time, args.Format, args.Location)
}
-func Time[K any](inputTime ottl.StringGetter[K], format string) (ottl.ExprFunc[K], error) {
+func Time[K any](inputTime ottl.StringGetter[K], format string, location ottl.Optional[string]) (ottl.ExprFunc[K], error) {
if format == "" {
return nil, fmt.Errorf("format cannot be nil")
}
- loc, err := timeutils.GetLocation(nil, &format)
+ var defaultLocation *string
+ if !location.IsEmpty() {
+ l := location.Get()
+ defaultLocation = &l
+ }
+
+ loc, err := timeutils.GetLocation(defaultLocation, &format)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_year.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_year.go
new file mode 100644
index 0000000000..b64b35bcd8
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_year.go
@@ -0,0 +1,39 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+type YearArguments[K any] struct {
+ Time ottl.TimeGetter[K]
+}
+
+func NewYearFactory[K any]() ottl.Factory[K] {
+ return ottl.NewFactory("Year", &YearArguments[K]{}, createYearFunction[K])
+}
+
+func createYearFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) {
+ args, ok := oArgs.(*YearArguments[K])
+
+ if !ok {
+ return nil, fmt.Errorf("YearFactory args must be of type *YearArguments[K]")
+ }
+
+ return Year(args.Time)
+}
+
+func Year[K any](time ottl.TimeGetter[K]) (ottl.ExprFunc[K], error) {
+ return func(ctx context.Context, tCtx K) (any, error) {
+ t, err := time.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ return int64(t.Year()), nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/functions.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/functions.go
index 25a596c3c7..c67f68952c 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/functions.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/functions.go
@@ -38,6 +38,7 @@ func converters[K any]() []ottl.Factory[K] {
NewBase64DecodeFactory[K](),
NewConcatFactory[K](),
NewConvertCaseFactory[K](),
+ NewDayFactory[K](),
NewDoubleFactory[K](),
NewDurationFactory[K](),
NewExtractPatternsFactory[K](),
@@ -47,6 +48,7 @@ func converters[K any]() []ottl.Factory[K] {
NewIntFactory[K](),
NewIsBoolFactory[K](),
NewIsDoubleFactory[K](),
+ NewIsListFactory[K](),
NewIsIntFactory[K](),
NewIsMapFactory[K](),
NewIsMatchFactory[K](),
@@ -55,7 +57,9 @@ func converters[K any]() []ottl.Factory[K] {
NewLogFactory[K](),
NewMicrosecondsFactory[K](),
NewMillisecondsFactory[K](),
+ NewMinuteFactory[K](),
NewMinutesFactory[K](),
+ NewMonthFactory[K](),
NewNanosecondsFactory[K](),
NewNowFactory[K](),
NewParseCSVFactory[K](),
@@ -78,5 +82,6 @@ func converters[K any]() []ottl.Factory[K] {
NewUnixNanoFactory[K](),
NewUnixSecondsFactory[K](),
NewUUIDFactory[K](),
+ NewYearFactory[K](),
}
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog/LICENSE b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/LICENSE
similarity index 100%
rename from vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog/LICENSE
rename to vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/LICENSE
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog/Makefile b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/Makefile
similarity index 100%
rename from vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/datadog/Makefile
rename to vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/Makefile
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/README.md
new file mode 100644
index 0000000000..eb3a4f8cc1
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/README.md
@@ -0,0 +1,23 @@
+# pkg/sampling
+
+## Overview
+
+This package contains utilities for parsing and interpreting the W3C
+[TraceState](https://www.w3.org/TR/trace-context/#tracestate-header)
+and all sampling-relevant fields specified by OpenTelemetry that may
+be found in the OpenTelemetry section of the W3C TraceState.
+
+This package implements the draft specification in [OTEP
+235](https://github.com/open-telemetry/oteps/pull/235), which
+specifies two fields used by the OpenTelemetry consistent probability
+sampling scheme.
+
+These are:
+
+- `th`: the Threshold used to determine whether a TraceID is sampled
+- `rv`: an explicit randomness value, which overrides randomness in the TraceID
+
+[OTEP 235](https://github.com/open-telemetry/oteps/pull/235) contains
+details on how to interpret these fields. The are not meant to be
+human readable, with a few exceptions. The tracestate entry `ot=th:0`
+indicates 100% sampling.
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/common.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/common.go
new file mode 100644
index 0000000000..ad94bac763
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/common.go
@@ -0,0 +1,125 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling"
+
+import (
+ "errors"
+ "io"
+ "strings"
+
+ "go.uber.org/multierr"
+)
+
+// KV represents a key-value parsed from a section of the TraceState.
+type KV struct {
+ Key string
+ Value string
+}
+
+var (
+ // ErrTraceStateSize is returned when a TraceState is over its
+ // size limit, as specified by W3C.
+ ErrTraceStateSize = errors.New("invalid tracestate size")
+)
+
+// keyValueScanner defines distinct scanner behaviors for lists of
+// key-values.
+type keyValueScanner struct {
+ // maxItems is 32 or -1
+ maxItems int
+ // trim is set if OWS (optional whitespace) should be removed
+ trim bool
+ // separator is , or ;
+ separator byte
+ // equality is = or :
+ equality byte
+}
+
+// commonTraceState is embedded in both W3C and OTel trace states.
+type commonTraceState struct {
+ kvs []KV
+}
+
+// ExtraValues returns additional values are carried in this
+// tracestate object (W3C or OpenTelemetry).
+func (cts commonTraceState) ExtraValues() []KV {
+ return cts.kvs
+}
+
+// trimOws removes optional whitespace on both ends of a string.
+// this uses the strict definition for optional whitespace tiven
+// in https://www.w3.org/TR/trace-context/#tracestate-header-field-values
+func trimOws(input string) string {
+ return strings.Trim(input, " \t")
+}
+
+// scanKeyValues is common code to scan either W3C or OTel tracestate
+// entries, as parameterized in the keyValueScanner struct.
+func (s keyValueScanner) scanKeyValues(input string, f func(key, value string) error) error {
+ var rval error
+ items := 0
+ for input != "" {
+ items++
+ if s.maxItems > 0 && items >= s.maxItems {
+ // W3C specifies max 32 entries, tested here
+ // instead of via the regexp.
+ return ErrTraceStateSize
+ }
+
+ sep := strings.IndexByte(input, s.separator)
+
+ var member string
+ if sep < 0 {
+ member = input
+ input = ""
+ } else {
+ member = input[:sep]
+ input = input[sep+1:]
+ }
+
+ if s.trim {
+ // Trim only required for W3C; OTel does not
+ // specify whitespace for its value encoding.
+ member = trimOws(member)
+ }
+
+ if member == "" {
+ // W3C allows empty list members.
+ continue
+ }
+
+ eq := strings.IndexByte(member, s.equality)
+ if eq < 0 {
+ // We expect to find the `s.equality`
+ // character in this string because we have
+ // already validated the whole input syntax
+ // before calling this parser. I.e., this can
+ // never happen, and if it did, the result
+ // would be to skip malformed entries.
+ continue
+ }
+ if err := f(member[:eq], member[eq+1:]); err != nil {
+ rval = multierr.Append(rval, err)
+ }
+ }
+ return rval
+}
+
+// serializer assists with checking and combining errors from
+// (io.StringWriter).WriteString().
+type serializer struct {
+ writer io.StringWriter
+ err error
+}
+
+// write handles errors from io.StringWriter.
+func (ser *serializer) write(str string) {
+ _, err := ser.writer.WriteString(str)
+ ser.check(err)
+}
+
+// check handles errors (e.g., from another serializer).
+func (ser *serializer) check(err error) {
+ ser.err = multierr.Append(ser.err, err)
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/doc.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/doc.go
new file mode 100644
index 0000000000..0c5b1052bb
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/doc.go
@@ -0,0 +1,94 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// # TraceState representation
+//
+// A [W3CTraceState] object parses and stores the OpenTelemetry
+// tracestate field and any other fields that are present in the
+// W3C tracestate header, part of the [W3C tracecontext specification].
+//
+// An [OpenTelemetryTraceState] object parses and stores fields of
+// the OpenTelemetry-specific tracestate field, including those recognized
+// for probability sampling and any other fields that are present. The
+// syntax of the OpenTelemetry field is specified in [Tracestate handling].
+//
+// The probability sampling-specific fields used here are specified in
+// [OTEP 235]. The principal named fields are:
+//
+// - T-value: The sampling rejection threshold, expresses a 56-bit
+// hexadecimal number of traces that will be rejected by sampling.
+// - R-value: The sampling randomness value can be implicit in a TraceID,
+// otherwise it is explicitly encoded as an R-value.
+//
+// # Low-level types
+//
+// The three key data types implemented in this package represent sampling
+// decisions.
+//
+// - [Threshold]: Represents an exact sampling probability.
+// - [Randomness]: Randomness used for sampling decisions.
+// - [Threshold.Probability]: a float64 in the range [MinSamplingProbability, 1.0].
+//
+// # Example use-case
+//
+// To configure a consistent tail sampler in an OpenTelemetry
+// Collector using a fixed probability for all traces in an
+// "equalizing" arrangement, where the effect of sampling is
+// conditioned on how much sampling has already taken place, use the
+// following pseudocode.
+//
+// func Setup() {
+// // Get a fixed probability value from the configuration, in
+// // the range (0, 1].
+// probability := *FLAG_probability
+//
+// // Calculate the sampling threshold from probability using 3
+// // hex digits of precision.
+// fixedThreshold, err = ProbabilityToThresholdWithPrecision(probability, 3)
+// if err != nil {
+// // error case: Probability is not valid.
+// }
+// }
+//
+// func MakeDecision(tracestate string, tid TraceID) bool {
+// // Parse the incoming tracestate
+// ts, err := NewW3CTraceState(tracestate)
+// if err != nil {
+// // error case: Tracestate is ill-formed.
+// }
+// // For an absolute probability sample, we check the incoming
+// // tracestate to see whether it was already sampled enough.
+// if threshold, hasThreshold := ts.OTelValue().TValueThreshold(); hasThreshold {
+// // If the incoming tracestate was already sampled at
+// // least as much as our threshold implies, then its
+// // (rejection) threshold is higher. If so, then no
+// // further sampling is called for.
+// if ThresholdGreater(threshold, fixedThreshold) {
+// // Do not update.
+// return true
+// }
+// // The error below is ignored because we've tested
+// // the equivalent condition above. This lowers the sampling
+// // probability expressed in the tracestate T-value.
+// _ = ts.OTelValue().UpdateThresholdWithSampling(fixedThreshold)
+// }
+// var rnd Randomness
+// // If the R-value is present, use it. If not, rely on TraceID
+// // randomness. Note that OTLP v1.1.0 introduces a new Span flag
+// // to convey trace randomness correctly, and if the context has
+// // neither the randomness bit set or the R-value set, we need a
+// // fallback, which can be to synthesize an R-value or to assume
+// // the TraceID has sufficient randomness. This detail is left
+// // out of scope.
+// if rv, hasRand := ts.OTelValue().RValueRandomness(); hasRand {
+// rnd = rv
+// } else {
+// rnd = TraceIDToRandomness(tid)
+// }
+// return fixedThreshold.ShouldSample(rnd)
+// }
+//
+// [W3C tracecontext specification]: https://www.w3.org/TR/trace-context/#tracestate-header
+// [Tracestate handling]: https://opentelemetry.io/docs/specs/otel/trace/tracestate-handling/
+
+package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling"
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/metadata.yaml
new file mode 100644
index 0000000000..8fe70cf1d9
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/metadata.yaml
@@ -0,0 +1,4 @@
+status:
+ class: pkg
+ codeowners:
+ active: [kentquirk, jmacd]
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/oteltracestate.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/oteltracestate.go
new file mode 100644
index 0000000000..fbd2773471
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/oteltracestate.go
@@ -0,0 +1,246 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling"
+
+import (
+ "errors"
+ "io"
+ "regexp"
+ "strconv"
+)
+
+// OpenTelemetryTraceState represents the `ot` section of the W3C tracestate
+// which is specified generically in https://opentelemetry.io/docs/specs/otel/trace/tracestate-handling/.
+//
+// OpenTelemetry defines two specific values that convey sampling
+// probability, known as T-Value (with "th", for threshold), R-Value
+// (with key "rv", for random value), and extra values.
+type OpenTelemetryTraceState struct {
+ commonTraceState
+
+ // sampling r and t-values
+ rnd Randomness // r value parsed, as unsigned
+ rvalue string // 14 ASCII hex digits
+ threshold Threshold // t value parsed, as a threshold
+ tvalue string // 1-14 ASCII hex digits
+}
+
+const (
+ // rValueFieldName is the OTel tracestate field for R-value
+ rValueFieldName = "rv"
+ // tValueFieldName is the OTel tracestate field for T-value
+ tValueFieldName = "th"
+
+ // hardMaxOTelLength is the maximum encoded size of an OTel
+ // tracestate value.
+ hardMaxOTelLength = 256
+
+ // chr = ucalpha / lcalpha / DIGIT / "." / "_" / "-"
+ // ucalpha = %x41-5A ; A-Z
+ // lcalpha = %x61-7A ; a-z
+ // key = lcalpha *(lcalpha / DIGIT )
+ // value = *(chr)
+ // list-member = key ":" value
+ // list = list-member *( ";" list-member )
+ otelKeyRegexp = lcAlphaRegexp + lcAlphanumRegexp + `*`
+ otelValueRegexp = `[a-zA-Z0-9._\-]*`
+ otelMemberRegexp = `(?:` + otelKeyRegexp + `:` + otelValueRegexp + `)`
+ otelSemicolonMemberRegexp = `(?:` + `;` + otelMemberRegexp + `)`
+ otelTracestateRegexp = `^` + otelMemberRegexp + otelSemicolonMemberRegexp + `*$`
+)
+
+var (
+ otelTracestateRe = regexp.MustCompile(otelTracestateRegexp)
+
+ otelSyntax = keyValueScanner{
+ maxItems: -1,
+ trim: false,
+ separator: ';',
+ equality: ':',
+ }
+
+ // ErrInconsistentSampling is returned when a sampler update
+ // is illogical, indicating that the tracestate was not
+ // modified. Preferably, Samplers will avoid seeing this
+ // error by using a ThresholdGreater() test, which allows them
+ // to report a more clear error to the user. For example, if
+ // data arrives sampled at 1/100 and an equalizing sampler is
+ // configured for 1/2 sampling, the Sampler may detect the
+ // illogical condition itself using ThresholdGreater and skip
+ // the call to UpdateTValueWithSampling, which will have no
+ // effect and return this error. How a sampler decides to
+ // handle this condition is up to the sampler: for example the
+ // equalizing sampler can decide to pass through a span
+ // indicating 1/100 sampling or it can reject the span.
+ ErrInconsistentSampling = errors.New("cannot raise existing sampling probability")
+)
+
+// NewOpenTelemetryTraceState returns a parsed representation of the
+// OpenTelemetry tracestate section. Errors indicate an invalid
+// tracestate was received.
+func NewOpenTelemetryTraceState(input string) (OpenTelemetryTraceState, error) {
+ otts := OpenTelemetryTraceState{}
+
+ if len(input) > hardMaxOTelLength {
+ return otts, ErrTraceStateSize
+ }
+
+ if !otelTracestateRe.MatchString(input) {
+ return otts, strconv.ErrSyntax
+ }
+
+ err := otelSyntax.scanKeyValues(input, func(key, value string) error {
+ var err error
+ switch key {
+ case rValueFieldName:
+ if otts.rnd, err = RValueToRandomness(value); err == nil {
+ otts.rvalue = value
+ } else {
+ // RValueRandomness() will return false, the error
+ // accumulates and is returned below.
+ otts.rvalue = ""
+ otts.rnd = Randomness{}
+ }
+ case tValueFieldName:
+ if otts.threshold, err = TValueToThreshold(value); err == nil {
+ otts.tvalue = value
+ } else {
+ // TValueThreshold() will return false, the error
+ // accumulates and is returned below.
+ otts.tvalue = ""
+ otts.threshold = AlwaysSampleThreshold
+ }
+ default:
+ otts.kvs = append(otts.kvs, KV{
+ Key: key,
+ Value: value,
+ })
+ }
+ return err
+ })
+
+ return otts, err
+}
+
+// RValue returns the R-value (key: "rv") as a string or empty if
+// there is no R-value set.
+func (otts *OpenTelemetryTraceState) RValue() string {
+ return otts.rvalue
+}
+
+// RValueRandomness returns the randomness object corresponding with
+// RValue() and a boolean indicating whether the R-value is set.
+func (otts *OpenTelemetryTraceState) RValueRandomness() (Randomness, bool) {
+ return otts.rnd, len(otts.rvalue) != 0
+}
+
+// TValue returns the T-value (key: "th") as a string or empty if
+// there is no T-value set.
+func (otts *OpenTelemetryTraceState) TValue() string {
+ return otts.tvalue
+}
+
+// TValueThreshold returns the threshold object corresponding with
+// TValue() and a boolean (equal to len(TValue()) != 0 indicating
+// whether the T-value is valid.
+func (otts *OpenTelemetryTraceState) TValueThreshold() (Threshold, bool) {
+ return otts.threshold, len(otts.tvalue) != 0
+}
+
+// UpdateTValueWithSampling modifies the TValue of this object, which
+// changes its adjusted count. It is not logical to modify a sampling
+// probability in the direction of larger probability. This prevents
+// accidental loss of adjusted count.
+//
+// If the change of TValue leads to inconsistency, an error is returned.
+func (otts *OpenTelemetryTraceState) UpdateTValueWithSampling(sampledThreshold Threshold) error {
+ // Note: there was once a code path here that optimized for
+ // cases where a static threshold is used, in which case the
+ // call to TValue() causes an unnecessary allocation per data
+ // item (w/ a constant result). We have eliminated that
+ // parameter, due to the significant potential for mis-use.
+ // Therefore, this method always recomputes TValue() of the
+ // sampledThreshold (on success). A future method such as
+ // UpdateTValueWithSamplingFixedTValue() could extend this
+ // API to address this allocation, although it is probably
+ // not significant.
+ if len(otts.TValue()) != 0 && ThresholdGreater(otts.threshold, sampledThreshold) {
+ return ErrInconsistentSampling
+ }
+ // Note NeverSampleThreshold is the (exclusive) upper boundary
+ // of valid thresholds, so the test above permits never-
+ // sampled updates, in which case the TValue() here is empty.
+ otts.threshold = sampledThreshold
+ otts.tvalue = sampledThreshold.TValue()
+ return nil
+}
+
+// AdjustedCount returns the adjusted count for this item. If the
+// TValue string is empty, this returns 0, otherwise returns
+// Threshold.AdjustedCount().
+func (otts *OpenTelemetryTraceState) AdjustedCount() float64 {
+ if len(otts.tvalue) == 0 {
+ // Note: this case covers the zero state, where
+ // len(tvalue) == 0 and threshold == AlwaysSampleThreshold.
+ // We return 0 to indicate that no information is available.
+ return 0
+ }
+ return otts.threshold.AdjustedCount()
+}
+
+// ClearTValue is used to unset TValue, for use in cases where it is
+// inconsistent on arrival.
+func (otts *OpenTelemetryTraceState) ClearTValue() {
+ otts.tvalue = ""
+ otts.threshold = Threshold{}
+}
+
+// SetRValue establishes explicit randomness for this TraceState.
+func (otts *OpenTelemetryTraceState) SetRValue(randomness Randomness) {
+ otts.rnd = randomness
+ otts.rvalue = randomness.RValue()
+}
+
+// ClearRValue unsets explicit randomness.
+func (otts *OpenTelemetryTraceState) ClearRValue() {
+ otts.rvalue = ""
+ otts.rnd = Randomness{}
+}
+
+// HasAnyValue returns true if there are any fields in this
+// tracestate, including any extra values.
+func (otts *OpenTelemetryTraceState) HasAnyValue() bool {
+ return len(otts.RValue()) != 0 || len(otts.TValue()) != 0 || len(otts.ExtraValues()) != 0
+}
+
+// Serialize encodes this TraceState object.
+func (otts *OpenTelemetryTraceState) Serialize(w io.StringWriter) error {
+ ser := serializer{writer: w}
+ cnt := 0
+ sep := func() {
+ if cnt != 0 {
+ ser.write(";")
+ }
+ cnt++
+ }
+ if len(otts.RValue()) != 0 {
+ sep()
+ ser.write(rValueFieldName)
+ ser.write(":")
+ ser.write(otts.RValue())
+ }
+ if len(otts.TValue()) != 0 {
+ sep()
+ ser.write(tValueFieldName)
+ ser.write(":")
+ ser.write(otts.TValue())
+ }
+ for _, kv := range otts.ExtraValues() {
+ sep()
+ ser.write(kv.Key)
+ ser.write(":")
+ ser.write(kv.Value)
+ }
+ return ser.err
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/probability.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/probability.go
new file mode 100644
index 0000000000..1aeebdd860
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/probability.go
@@ -0,0 +1,82 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling"
+
+import (
+ "errors"
+ "math"
+)
+
+// ErrProbabilityRange is returned when a value should be in the range [1/MaxAdjustedCount, 1].
+var ErrProbabilityRange = errors.New("sampling probability out of the range [1/MaxAdjustedCount, 1]")
+
+// MinSamplingProbability is the smallest representable probability
+// and is the inverse of MaxAdjustedCount.
+const MinSamplingProbability = 1.0 / float64(MaxAdjustedCount)
+
+// probabilityInRange tests MinSamplingProb <= prob <= 1.
+func probabilityInRange(prob float64) bool {
+ return prob >= MinSamplingProbability && prob <= 1
+}
+
+// ProbabilityToThreshold converts a probability to a Threshold. It
+// returns an error when the probability is out-of-range.
+func ProbabilityToThreshold(prob float64) (Threshold, error) {
+ return ProbabilityToThresholdWithPrecision(prob, NumHexDigits)
+}
+
+// ProbabilityToThresholdWithPrecision is like ProbabilityToThreshold
+// with support for reduced precision. The `precision` argument determines
+// how many significant hex digits will be used to encode the exact
+// probability.
+func ProbabilityToThresholdWithPrecision(fraction float64, precision int) (Threshold, error) {
+ // Assume full precision at 0.
+ if precision == 0 {
+ precision = NumHexDigits
+ }
+ if !probabilityInRange(fraction) {
+ return AlwaysSampleThreshold, ErrProbabilityRange
+ }
+ // Special case for prob == 1.
+ if fraction == 1 {
+ return AlwaysSampleThreshold, nil
+ }
+
+ // Calculate the amount of precision needed to encode the
+ // threshold with reasonable precision. Here, we count the
+ // number of leading `0` or `f` characters and automatically
+ // add precision to preserve relative error near the extremes.
+ //
+ // Frexp() normalizes both the fraction and one-minus the
+ // fraction, because more digits of precision are needed if
+ // either value is near zero. Frexp returns an exponent <= 0.
+ //
+ // If `exp <= -4`, there will be a leading hex `0` or `f`.
+ // For every multiple of -4, another leading `0` or `f`
+ // appears, so this raises precision accordingly.
+ _, expF := math.Frexp(fraction)
+ _, expR := math.Frexp(1 - fraction)
+ precision = min(NumHexDigits, max(precision+expF/-hexBits, precision+expR/-hexBits))
+
+ // Compute the threshold
+ scaled := uint64(math.Round(fraction * float64(MaxAdjustedCount)))
+ threshold := MaxAdjustedCount - scaled
+
+ // Round to the specified precision, if less than the maximum.
+ if shift := hexBits * (NumHexDigits - precision); shift != 0 {
+ half := uint64(1) << (shift - 1)
+ threshold += half
+ threshold >>= shift
+ threshold <<= shift
+ }
+
+ return Threshold{
+ unsigned: threshold,
+ }, nil
+}
+
+// Probability is the sampling ratio in the range [MinSamplingProb, 1].
+func (t Threshold) Probability() float64 {
+ return float64(MaxAdjustedCount-t.unsigned) / float64(MaxAdjustedCount)
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/randomness.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/randomness.go
new file mode 100644
index 0000000000..8e1cac2f0f
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/randomness.go
@@ -0,0 +1,116 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling"
+
+import (
+ "encoding/binary"
+ "errors"
+ "strconv"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+)
+
+// numRandomnessValues equals MaxAdjustedCount--this variable has been
+// introduced to improve readability. Recall that MaxAdjustedCount is
+// 2**56 which is one greater than the maximum RValue
+// ("ffffffffffffff", i.e., "100000000000000").
+const numRandomnessValues = MaxAdjustedCount
+
+// ErrRValueSize is returned by RValueToRandomess in case of
+// unexpected size.
+var ErrRValueSize = errors.New("r-value must have 14 hex digits")
+
+// leastHalfTraceIDThresholdMask is the mask to use on the
+// least-significant half of the TraceID, i.e., bytes 8-15.
+// Because this is a 56 bit mask, the result after masking is
+// the unsigned value of bytes 9 through 15.
+//
+// This helps extract 56 bits of randomness from the second half of
+// the TraceID, as specified in https://www.w3.org/TR/trace-context-2/#randomness-of-trace-id
+const leastHalfTraceIDThresholdMask = MaxAdjustedCount - 1
+
+// AllProbabilitiesRandomness is sampled at all probabilities.
+var AllProbabilitiesRandomness = Randomness{unsigned: numRandomnessValues - 1}
+
+// Randomness may be derived from R-value or TraceID.
+//
+// Randomness contains 56 bits of randomness, derived in one of two ways, see:
+// https://www.w3.org/TR/trace-context-2/#randomness-of-trace-id
+type Randomness struct {
+ // unsigned is in the range [0, MaxAdjustedCount-1]
+ unsigned uint64
+}
+
+// TraceIDToRandomness returns randomness from a TraceID (assumes
+// the traceparent random flag was set).
+func TraceIDToRandomness(id pcommon.TraceID) Randomness {
+ // To get the 56 bits we want, take the second half of the trace ID,
+ leastHalf := binary.BigEndian.Uint64(id[8:])
+ return Randomness{
+ // Then apply the mask to get the least-significant 56 bits / 7 bytes.
+ // Equivalently stated: zero the most-significant 8 bits.
+ unsigned: leastHalf & leastHalfTraceIDThresholdMask,
+ }
+}
+
+// RValueToRandomness parses NumHexDigits hex bytes into a Randomness.
+func RValueToRandomness(s string) (Randomness, error) {
+ if len(s) != NumHexDigits {
+ return Randomness{}, ErrRValueSize
+ }
+
+ unsigned, err := strconv.ParseUint(s, hexBase, 64)
+ if err != nil {
+ return Randomness{}, err
+ }
+
+ return Randomness{
+ unsigned: unsigned,
+ }, nil
+}
+
+// RValue formats the r-value encoding.
+func (rnd Randomness) RValue() string {
+ // The important part here is to format a full 14-byte hex
+ // string, including leading zeros. We could accomplish the
+ // same with custom code or with fmt.Sprintf directives, but
+ // here we let strconv.FormatUint fill in leading zeros, as
+ // follows:
+ //
+ // Format (numRandomnessValues+Randomness) as a hex string
+ // Strip the leading hex digit, which is a "1" by design
+ //
+ // For example, a randomness that requires two leading zeros
+ // (all in hexadecimal):
+ //
+ // randomness is 7 bytes: aabbccddeeff
+ // numRandomnessValues is 2^56: 100000000000000
+ // randomness+numRandomnessValues: 100aabbccddeeff
+ // strip the leading "1": 00aabbccddeeff
+ //
+ // If the value is out-of-range, the empty string will be
+ // returned.
+ if rnd.unsigned >= numRandomnessValues {
+ return ""
+ }
+ return strconv.FormatUint(numRandomnessValues+rnd.unsigned, hexBase)[1:]
+}
+
+// Unsigned returns the unsigned representation of the random value.
+// Items of data SHOULD be sampled when:
+//
+// Threshold.Unsigned() <= // Randomness.Unsigned().
+func (rnd Randomness) Unsigned() uint64 {
+ return rnd.unsigned
+}
+
+// UnsignedToRandomness constructs a randomness using 56 random bits
+// of unsigned number. If the input is out of range, an invalid value
+// will be returned with an error.
+func UnsignedToRandomness(x uint64) (Randomness, error) {
+ if x >= MaxAdjustedCount {
+ return AllProbabilitiesRandomness, ErrRValueSize
+ }
+ return Randomness{unsigned: x}, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/threshold.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/threshold.go
new file mode 100644
index 0000000000..044fef736d
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/threshold.go
@@ -0,0 +1,156 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling"
+
+import (
+ "errors"
+ "strconv"
+ "strings"
+)
+
+const (
+ // MaxAdjustedCount is 2^56 i.e. 0x100000000000000 i.e., 1<<56.
+ MaxAdjustedCount uint64 = 1 << 56
+
+ // NumHexDigits is the number of hex digits equalling 56 bits.
+ // This is the limit of sampling precision.
+ NumHexDigits = 56 / hexBits
+
+ hexBits = 4
+ hexBase = 16
+)
+
+// Threshold represents an exact sampling probability using 56 bits of
+// precision. A Threshold expresses the number of spans, out of 2**56,
+// that are rejected.
+//
+// These 56 bits are compared against 56 bits of randomness, either
+// extracted from an R-value or a TraceID having the W3C-specified
+// randomness bit set.
+//
+// Because Thresholds store 56 bits of information and floating point
+// values store 52 bits of significand, some conversions between
+// Threshold and probability values are lossy. The kinds of loss that
+// occur depend on where in the probability scale it happens, as the
+// step between adjacent floating point values adjusts with the exponent.
+type Threshold struct {
+ // unsigned is in the range [0, MaxAdjustedCount]
+ // - 0 represents always sampling (0 Random values are less-than)
+ // - 1 represents sampling 1-in-(MaxAdjustedCount-1)
+ // - MaxAdjustedCount represents always sampling 1-in-
+ unsigned uint64
+}
+
+var (
+ // ErrTValueSize is returned for t-values longer than NumHexDigits hex digits.
+ ErrTValueSize = errors.New("t-value exceeds 14 hex digits")
+
+ // ErrEmptyTValue indicates no t-value was found, i.e., no threshold available.
+ ErrTValueEmpty = errors.New("t-value is empty")
+
+ // AlwaysSampleThreshold represents 100% sampling.
+ AlwaysSampleThreshold = Threshold{unsigned: 0}
+
+ // NeverSampledThreshold is a threshold value that will always not sample.
+ // The TValue() corresponding with this threshold is an empty string.
+ NeverSampleThreshold = Threshold{unsigned: MaxAdjustedCount}
+)
+
+// TValueToThreshold returns a Threshold. Because TValue strings
+// have trailing zeros omitted, this function performs the reverse.
+func TValueToThreshold(s string) (Threshold, error) {
+ if len(s) > NumHexDigits {
+ return AlwaysSampleThreshold, ErrTValueSize
+ }
+ if len(s) == 0 {
+ return AlwaysSampleThreshold, ErrTValueEmpty
+ }
+
+ // Having checked length above, there are no range errors
+ // possible. Parse the hex string to an unsigned value.
+ unsigned, err := strconv.ParseUint(s, hexBase, 64)
+ if err != nil {
+ return AlwaysSampleThreshold, err // e.g. parse error
+ }
+
+ // The unsigned value requires shifting to account for the
+ // trailing zeros that were omitted by the encoding (see
+ // TValue for the reverse). Compute the number to shift by:
+ extendByHexZeros := NumHexDigits - len(s)
+ return Threshold{
+ unsigned: unsigned << (hexBits * extendByHexZeros),
+ }, nil
+}
+
+// UnsignedToThreshold constructs a threshold expressed in terms
+// defined by number of rejections out of MaxAdjustedCount, which
+// equals the number of randomness values.
+func UnsignedToThreshold(unsigned uint64) (Threshold, error) {
+ if unsigned >= MaxAdjustedCount {
+ return NeverSampleThreshold, ErrTValueSize
+ }
+ return Threshold{unsigned: unsigned}, nil
+}
+
+// TValue encodes a threshold, which is a variable-length hex string
+// up to 14 characters. The empty string is returned for 100%
+// sampling.
+func (th Threshold) TValue() string {
+ // Always-sample is a special case because TrimRight() below
+ // will trim it to the empty string, which represents no t-value.
+ switch th {
+ case AlwaysSampleThreshold:
+ return "0"
+ case NeverSampleThreshold:
+ return ""
+ }
+ // For thresholds other than the extremes, format a full-width
+ // (14 digit) unsigned value with leading zeros, then, remove
+ // the trailing zeros. Use the logic for (Randomness).RValue().
+ digits := Randomness(th).RValue()
+
+ // Remove trailing zeros.
+ return strings.TrimRight(digits, "0")
+}
+
+// ShouldSample returns true when the span passes this sampler's
+// consistent sampling decision. The sampling decision can be
+// expressed as a T <= R.
+func (th Threshold) ShouldSample(rnd Randomness) bool {
+ return th.unsigned <= rnd.unsigned
+}
+
+// Unsigned expresses the number of Randomness values (out of
+// MaxAdjustedCount) that are rejected or not sampled. 0 means 100%
+// sampling.
+func (th Threshold) Unsigned() uint64 {
+ return th.unsigned
+}
+
+// AdjustedCount returns the adjusted count for this item, which is
+// the representativity of the item due to sampling, equal to the
+// inverse of sampling probability. If the threshold equals
+// NeverSampleThreshold, the item should not have been sampled, in
+// which case the Adjusted count is zero.
+//
+// This term is defined here:
+// https://opentelemetry.io/docs/specs/otel/trace/tracestate-probability-sampling/
+func (th Threshold) AdjustedCount() float64 {
+ if th == NeverSampleThreshold {
+ return 0
+ }
+ return 1.0 / th.Probability()
+}
+
+// ThresholdGreater allows direct comparison of Threshold values.
+// Greater thresholds equate with smaller sampling probabilities.
+func ThresholdGreater(a, b Threshold) bool {
+ return a.unsigned > b.unsigned
+}
+
+// ThresholdLessThan allows direct comparison of Threshold values.
+// Smaller thresholds equate with greater sampling probabilities.
+func ThresholdLessThan(a, b Threshold) bool {
+ return a.unsigned < b.unsigned
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/w3ctracestate.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/w3ctracestate.go
new file mode 100644
index 0000000000..a0ebac614d
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling/w3ctracestate.go
@@ -0,0 +1,177 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling"
+
+import (
+ "io"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// W3CTraceState represents the a parsed W3C `tracestate` header.
+//
+// This type receives and passes through `tracestate` fields defined
+// by all vendors, while it parses and validates the
+// [OpenTelemetryTraceState] field. After parsing the W3CTraceState,
+// access the OpenTelemetry-defined fields using
+// [W3CTraceState.OTelValue].
+type W3CTraceState struct {
+ // commonTraceState holds "extra" values (e.g.,
+ // vendor-specific tracestate fields) which are propagated but
+ // not used by Sampling logic.
+ commonTraceState
+
+ // otts stores OpenTelemetry-specified tracestate fields.
+ otts OpenTelemetryTraceState
+}
+
+const (
+ hardMaxNumPairs = 32
+ hardMaxW3CLength = 1024
+ hardMaxKeyLength = 256
+ hardMaxTenantLength = 241
+ hardMaxSystemLength = 14
+
+ otelVendorCode = "ot"
+
+ // keyRegexp is not an exact test, it permits all the
+ // characters and then we check various conditions.
+
+ // key = simple-key / multi-tenant-key
+ // simple-key = lcalpha 0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
+ // multi-tenant-key = tenant-id "@" system-id
+ // tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
+ // system-id = lcalpha 0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
+ // lcalpha = %x61-7A ; a-z
+
+ lcAlphaRegexp = `[a-z]`
+ lcAlphanumPunctRegexp = `[a-z0-9\-\*/_]`
+ lcAlphanumRegexp = `[a-z0-9]`
+ multiTenantSep = `@`
+ tenantIDRegexp = lcAlphanumRegexp + lcAlphanumPunctRegexp + `*`
+ systemIDRegexp = lcAlphaRegexp + lcAlphanumPunctRegexp + `*`
+ multiTenantKeyRegexp = tenantIDRegexp + multiTenantSep + systemIDRegexp
+ simpleKeyRegexp = lcAlphaRegexp + lcAlphanumPunctRegexp + `*`
+ keyRegexp = `(?:(?:` + simpleKeyRegexp + `)|(?:` + multiTenantKeyRegexp + `))`
+
+ // value = 0*255(chr) nblk-chr
+ // nblk-chr = %x21-2B / %x2D-3C / %x3E-7E
+ // chr = %x20 / nblk-chr
+ //
+ // Note the use of double-quoted strings in two places below.
+ // This is for \x expansion in these two cases. Also note
+ // \x2d is a hyphen character, so a quoted \ (i.e., \\\x2d)
+ // appears below.
+ valueNonblankCharRegexp = "[\x21-\x2b\\\x2d-\x3c\x3e-\x7e]"
+ valueCharRegexp = "[\x20-\x2b\\\x2d-\x3c\x3e-\x7e]"
+ valueRegexp = valueCharRegexp + `{0,255}` + valueNonblankCharRegexp
+
+ // tracestate = list-member 0*31( OWS "," OWS list-member )
+ // list-member = (key "=" value) / OWS
+
+ owsCharSet = ` \t`
+ owsRegexp = `(?:[` + owsCharSet + `]*)`
+ w3cMemberRegexp = `(?:` + keyRegexp + `=` + valueRegexp + `)?`
+
+ w3cOwsMemberOwsRegexp = `(?:` + owsRegexp + w3cMemberRegexp + owsRegexp + `)`
+ w3cCommaOwsMemberOwsRegexp = `(?:` + `,` + w3cOwsMemberOwsRegexp + `)`
+
+ w3cTracestateRegexp = `^` + w3cOwsMemberOwsRegexp + w3cCommaOwsMemberOwsRegexp + `*$`
+
+ // Note that fixed limits on tracestate size are captured above
+ // as '*' regular expressions, which allows the parser to exceed
+ // fixed limits, which are checked in code. This keeps the size
+ // of the compiled regexp reasonable. Some of the regexps above
+ // are too complex to expand e.g., 31 times. In the case of
+ // w3cTracestateRegexp, 32 elements are allowed, which means we
+ // want the w3cCommaOwsMemberOwsRegexp element to match at most
+ // 31 times, but this is checked in code.
+)
+
+var (
+ w3cTracestateRe = regexp.MustCompile(w3cTracestateRegexp)
+
+ w3cSyntax = keyValueScanner{
+ maxItems: hardMaxNumPairs,
+ trim: true,
+ separator: ',',
+ equality: '=',
+ }
+)
+
+// NewW3CTraceState parses a W3C trace state, with special attention
+// to the embedded OpenTelemetry trace state field.
+func NewW3CTraceState(input string) (w3c W3CTraceState, _ error) {
+ if len(input) > hardMaxW3CLength {
+ return w3c, ErrTraceStateSize
+ }
+
+ if !w3cTracestateRe.MatchString(input) {
+ return w3c, strconv.ErrSyntax
+ }
+
+ err := w3cSyntax.scanKeyValues(input, func(key, value string) error {
+ if len(key) > hardMaxKeyLength {
+ return ErrTraceStateSize
+ }
+ if tenant, system, found := strings.Cut(key, multiTenantSep); found {
+ if len(tenant) > hardMaxTenantLength {
+ return ErrTraceStateSize
+ }
+ if len(system) > hardMaxSystemLength {
+ return ErrTraceStateSize
+ }
+ }
+ switch key {
+ case otelVendorCode:
+ var err error
+ w3c.otts, err = NewOpenTelemetryTraceState(value)
+ return err
+ default:
+ w3c.kvs = append(w3c.kvs, KV{
+ Key: key,
+ Value: value,
+ })
+ return nil
+ }
+ })
+ return w3c, err
+}
+
+// HasAnyValue indicates whether there are any values in this
+// tracestate, including extra values.
+func (w3c *W3CTraceState) HasAnyValue() bool {
+ return w3c.OTelValue().HasAnyValue() || len(w3c.ExtraValues()) != 0
+}
+
+// OTelValue returns the OpenTelemetry tracestate value.
+func (w3c *W3CTraceState) OTelValue() *OpenTelemetryTraceState {
+ return &w3c.otts
+}
+
+// Serialize encodes this tracestate object for use as a W3C
+// tracestate header value.
+func (w3c *W3CTraceState) Serialize(w io.StringWriter) error {
+ ser := serializer{writer: w}
+ cnt := 0
+ sep := func() {
+ if cnt != 0 {
+ ser.write(",")
+ }
+ cnt++
+ }
+ if w3c.otts.HasAnyValue() {
+ sep()
+ ser.write("ot=")
+ ser.check(w3c.otts.Serialize(w))
+ }
+ for _, kv := range w3c.ExtraValues() {
+ sep()
+ ser.write(kv.Key)
+ ser.write("=")
+ ser.write(kv.Value)
+ }
+ return ser.err
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/converter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/converter.go
index d5d95215a6..de7c259d64 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/converter.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/converter.go
@@ -15,6 +15,7 @@ import (
"sync"
"github.com/cespare/xxhash/v2"
+ "go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.uber.org/zap"
@@ -52,6 +53,8 @@ import (
// │ downstream consumers via OutChannel() │
// └─────────────────────────────────────────────────────┘
type Converter struct {
+ set component.TelemetrySettings
+
// pLogsChan is a channel on which aggregated logs will be sent to.
pLogsChan chan plog.Logs
@@ -70,8 +73,6 @@ type Converter struct {
// wg is a WaitGroup that makes sure that we wait for spun up goroutines exit
// when Stop() is called.
wg sync.WaitGroup
-
- logger *zap.Logger
}
type converterOption interface {
@@ -90,14 +91,15 @@ func (o workerCountOption) apply(c *Converter) {
c.workerCount = o.workerCount
}
-func NewConverter(logger *zap.Logger, opts ...converterOption) *Converter {
+func NewConverter(set component.TelemetrySettings, opts ...converterOption) *Converter {
+ set.Logger = set.Logger.With(zap.String("component", "converter"))
c := &Converter{
+ set: set,
workerChan: make(chan []*entry.Entry),
workerCount: int(math.Max(1, float64(runtime.NumCPU()/4))),
pLogsChan: make(chan plog.Logs),
stopChan: make(chan struct{}),
flushChan: make(chan plog.Logs),
- logger: logger,
}
for _, opt := range opts {
opt.apply(c)
@@ -106,7 +108,7 @@ func NewConverter(logger *zap.Logger, opts ...converterOption) *Converter {
}
func (c *Converter) Start() {
- c.logger.Debug("Starting log converter", zap.Int("worker_count", c.workerCount))
+ c.set.Logger.Debug("Starting log converter", zap.Int("worker_count", c.workerCount))
c.wg.Add(c.workerCount)
for i := 0; i < c.workerCount; i++ {
@@ -202,7 +204,7 @@ func (c *Converter) flushLoop() {
case pLogs := <-c.flushChan:
if err := c.flush(ctx, pLogs); err != nil {
- c.logger.Debug("Problem sending log entries",
+ c.set.Logger.Debug("Problem sending log entries",
zap.Error(err),
)
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/emitter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/emitter.go
index aa1fe90fa7..acd78ce020 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/emitter.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/emitter.go
@@ -4,174 +4,16 @@
package adapter // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter"
import (
- "context"
- "sync"
- "time"
-
+ "go.opentelemetry.io/collector/component"
"go.uber.org/zap"
- "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry"
- "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper"
)
-// LogEmitter is a stanza operator that emits log entries to a channel
-type LogEmitter struct {
- helper.OutputOperator
- logChan chan []*entry.Entry
- stopOnce sync.Once
- cancel context.CancelFunc
- batchMux sync.Mutex
- batch []*entry.Entry
- wg sync.WaitGroup
- maxBatchSize uint
- flushInterval time.Duration
-}
-
-var (
- defaultFlushInterval = 100 * time.Millisecond
- defaultMaxBatchSize uint = 100
-)
-
-type emitterOption interface {
- apply(*LogEmitter)
-}
-
-func withMaxBatchSize(maxBatchSize uint) emitterOption {
- return maxBatchSizeOption{maxBatchSize}
-}
-
-type maxBatchSizeOption struct {
- maxBatchSize uint
-}
-
-func (o maxBatchSizeOption) apply(e *LogEmitter) {
- e.maxBatchSize = o.maxBatchSize
-}
-
-func withFlushInterval(flushInterval time.Duration) emitterOption {
- return flushIntervalOption{flushInterval}
-}
-
-type flushIntervalOption struct {
- flushInterval time.Duration
-}
-
-func (o flushIntervalOption) apply(e *LogEmitter) {
- e.flushInterval = o.flushInterval
-}
-
-// NewLogEmitter creates a new receiver output
-func NewLogEmitter(logger *zap.SugaredLogger, opts ...emitterOption) *LogEmitter {
- e := &LogEmitter{
- OutputOperator: helper.OutputOperator{
- BasicOperator: helper.BasicOperator{
- OperatorID: "log_emitter",
- OperatorType: "log_emitter",
- SugaredLogger: logger,
- },
- },
- logChan: make(chan []*entry.Entry),
- maxBatchSize: defaultMaxBatchSize,
- batch: make([]*entry.Entry, 0, defaultMaxBatchSize),
- flushInterval: defaultFlushInterval,
- cancel: func() {},
- }
- for _, opt := range opts {
- opt.apply(e)
- }
- return e
-}
-
-// Start starts the goroutine(s) required for this operator
-func (e *LogEmitter) Start(_ operator.Persister) error {
- ctx, cancel := context.WithCancel(context.Background())
- e.cancel = cancel
-
- e.wg.Add(1)
- go e.flusher(ctx)
- return nil
-}
-
-// Stop will close the log channel and stop running goroutines
-func (e *LogEmitter) Stop() error {
- e.stopOnce.Do(func() {
- e.cancel()
- e.wg.Wait()
-
- close(e.logChan)
- })
-
- return nil
-}
-
-// OutChannel returns the channel on which entries will be sent to.
-func (e *LogEmitter) OutChannel() <-chan []*entry.Entry {
- return e.logChan
-}
-
-// Process will emit an entry to the output channel
-func (e *LogEmitter) Process(ctx context.Context, ent *entry.Entry) error {
- if oldBatch := e.appendEntry(ent); len(oldBatch) > 0 {
- e.flush(ctx, oldBatch)
- }
-
- return nil
-}
-
-// appendEntry appends the entry to the current batch. If maxBatchSize is reached, a new batch will be made, and the old batch
-// (which should be flushed) will be returned
-func (e *LogEmitter) appendEntry(ent *entry.Entry) []*entry.Entry {
- e.batchMux.Lock()
- defer e.batchMux.Unlock()
-
- e.batch = append(e.batch, ent)
- if uint(len(e.batch)) >= e.maxBatchSize {
- var oldBatch []*entry.Entry
- oldBatch, e.batch = e.batch, make([]*entry.Entry, 0, e.maxBatchSize)
- return oldBatch
- }
-
- return nil
-}
-
-// flusher flushes the current batch every flush interval. Intended to be run as a goroutine
-func (e *LogEmitter) flusher(ctx context.Context) {
- defer e.wg.Done()
-
- ticker := time.NewTicker(e.flushInterval)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- if oldBatch := e.makeNewBatch(); len(oldBatch) > 0 {
- e.flush(ctx, oldBatch)
- }
- case <-ctx.Done():
- return
- }
- }
-}
-
-// flush flushes the provided batch to the log channel.
-func (e *LogEmitter) flush(ctx context.Context, batch []*entry.Entry) {
- select {
- case e.logChan <- batch:
- case <-ctx.Done():
- }
-}
-
-// makeNewBatch replaces the current batch on the log emitter with a new batch, returning the old one
-func (e *LogEmitter) makeNewBatch() []*entry.Entry {
- e.batchMux.Lock()
- defer e.batchMux.Unlock()
-
- if len(e.batch) == 0 {
- return nil
- }
+// Deprecated [v0.101.0] Use helper.LogEmitter directly instead
+type LogEmitter = helper.LogEmitter
- var oldBatch []*entry.Entry
- oldBatch, e.batch = e.batch, make([]*entry.Entry, 0, e.maxBatchSize)
- return oldBatch
+// Deprecated [v0.101.0] Use helper.NewLogEmitter directly instead
+func NewLogEmitter(logger *zap.SugaredLogger, opts ...helper.EmitterOption) *LogEmitter {
+ return helper.NewLogEmitter(component.TelemetrySettings{Logger: logger.Desugar()}, opts...)
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/factory.go
index 5cdd7c9bfb..9c46aac9c6 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/factory.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/factory.go
@@ -13,6 +13,7 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/consumerretry"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/pipeline"
)
@@ -45,18 +46,18 @@ func createLogsReceiver(logReceiverType LogReceiverType) rcvr.CreateLogsFunc {
operators := append([]operator.Config{inputCfg}, baseCfg.Operators...)
- emitterOpts := []emitterOption{}
+ emitterOpts := []helper.EmitterOption{}
if baseCfg.maxBatchSize > 0 {
- emitterOpts = append(emitterOpts, withMaxBatchSize(baseCfg.maxBatchSize))
+ emitterOpts = append(emitterOpts, helper.WithMaxBatchSize(baseCfg.maxBatchSize))
}
if baseCfg.flushInterval > 0 {
- emitterOpts = append(emitterOpts, withFlushInterval(baseCfg.flushInterval))
+ emitterOpts = append(emitterOpts, helper.WithFlushInterval(baseCfg.flushInterval))
}
- emitter := NewLogEmitter(params.Logger.Sugar(), emitterOpts...)
+ emitter := helper.NewLogEmitter(params.TelemetrySettings, emitterOpts...)
pipe, err := pipeline.Config{
Operators: operators,
DefaultOutput: emitter,
- }.Build(params.Logger.Sugar())
+ }.Build(params.TelemetrySettings)
if err != nil {
return nil, err
}
@@ -65,7 +66,7 @@ func createLogsReceiver(logReceiverType LogReceiverType) rcvr.CreateLogsFunc {
if baseCfg.numWorkers > 0 {
converterOpts = append(converterOpts, withWorkerCount(baseCfg.numWorkers))
}
- converter := NewConverter(params.Logger, converterOpts...)
+ converter := NewConverter(params.TelemetrySettings, converterOpts...)
obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{
ReceiverID: params.ID,
ReceiverCreateSettings: params,
@@ -74,11 +75,11 @@ func createLogsReceiver(logReceiverType LogReceiverType) rcvr.CreateLogsFunc {
return nil, err
}
return &receiver{
+ set: params.TelemetrySettings,
id: params.ID,
pipe: pipe,
emitter: emitter,
consumer: consumerretry.NewLogs(baseCfg.RetryOnFailure, params.Logger, nextConsumer),
- logger: params.Logger,
converter: converter,
obsrecv: obsrecv,
storageID: baseCfg.StorageID,
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/frompdataconverter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/frompdataconverter.go
index 11a18e6d63..f294cdb3a0 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/frompdataconverter.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/frompdataconverter.go
@@ -10,6 +10,7 @@ import (
"sync"
"time"
+ "go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.uber.org/zap"
@@ -38,6 +39,8 @@ import (
// └─┤ and sends them along entriesChan │
// └───────────────────────────────────────────────────┘
type FromPdataConverter struct {
+ set component.TelemetrySettings
+
// entriesChan is a channel on which converted logs will be sent out of the converter.
entriesChan chan []*entry.Entry
@@ -51,28 +54,26 @@ type FromPdataConverter struct {
// wg is a WaitGroup that makes sure that we wait for spun up goroutines exit
// when Stop() is called.
wg sync.WaitGroup
-
- logger *zap.Logger
}
-func NewFromPdataConverter(workerCount int, logger *zap.Logger) *FromPdataConverter {
- if logger == nil {
- logger = zap.NewNop()
+func NewFromPdataConverter(set component.TelemetrySettings, workerCount int) *FromPdataConverter {
+ if set.Logger == nil {
+ set.Logger = zap.NewNop()
}
if workerCount <= 0 {
workerCount = int(math.Max(1, float64(runtime.NumCPU())))
}
return &FromPdataConverter{
+ set: set,
workerChan: make(chan fromConverterWorkerItem, workerCount),
entriesChan: make(chan []*entry.Entry),
stopChan: make(chan struct{}),
- logger: logger,
}
}
func (c *FromPdataConverter) Start() {
- c.logger.Debug("Starting log converter from pdata", zap.Int("worker_count", cap(c.workerChan)))
+ c.set.Logger.Debug("Starting log converter from pdata", zap.Int("worker_count", cap(c.workerChan)))
for i := 0; i < cap(c.workerChan); i++ {
c.wg.Add(1)
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/receiver.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/receiver.go
index ffdeeb1b94..4df6fd846b 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/receiver.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/receiver.go
@@ -16,19 +16,20 @@ import (
"go.uber.org/multierr"
"go.uber.org/zap"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/pipeline"
)
type receiver struct {
+ set component.TelemetrySettings
id component.ID
wg sync.WaitGroup
cancel context.CancelFunc
pipe pipeline.Pipeline
- emitter *LogEmitter
+ emitter *helper.LogEmitter
consumer consumer.Logs
converter *Converter
- logger *zap.Logger
obsrecv *receiverhelper.ObsReport
storageID *component.ID
@@ -42,7 +43,7 @@ var _ rcvr.Logs = (*receiver)(nil)
func (r *receiver) Start(ctx context.Context, host component.Host) error {
rctx, cancel := context.WithCancel(ctx)
r.cancel = cancel
- r.logger.Info("Starting stanza receiver")
+ r.set.Logger.Info("Starting stanza receiver")
if err := r.setStorageClient(ctx, host); err != nil {
return fmt.Errorf("storage client: %w", err)
@@ -87,16 +88,16 @@ func (r *receiver) emitterLoop(ctx context.Context) {
for {
select {
case <-doneChan:
- r.logger.Debug("Receive loop stopped")
+ r.set.Logger.Debug("Receive loop stopped")
return
- case e, ok := <-r.emitter.logChan:
+ case e, ok := <-r.emitter.OutChannel():
if !ok {
continue
}
if err := r.converter.Batch(e); err != nil {
- r.logger.Error("Could not add entry to batch", zap.Error(err))
+ r.set.Logger.Error("Could not add entry to batch", zap.Error(err))
}
}
}
@@ -112,19 +113,19 @@ func (r *receiver) consumerLoop(ctx context.Context) {
for {
select {
case <-doneChan:
- r.logger.Debug("Consumer loop stopped")
+ r.set.Logger.Debug("Consumer loop stopped")
return
case pLogs, ok := <-pLogsChan:
if !ok {
- r.logger.Debug("Converter channel got closed")
+ r.set.Logger.Debug("Converter channel got closed")
continue
}
obsrecvCtx := r.obsrecv.StartLogsOp(ctx)
logRecordCount := pLogs.LogRecordCount()
cErr := r.consumer.ConsumeLogs(ctx, pLogs)
if cErr != nil {
- r.logger.Error("ConsumeLogs() failed", zap.Error(cErr))
+ r.set.Logger.Error("ConsumeLogs() failed", zap.Error(cErr))
}
r.obsrecv.EndLogsOp(obsrecvCtx, "stanza", logRecordCount, cErr)
}
@@ -137,7 +138,7 @@ func (r *receiver) Shutdown(ctx context.Context) error {
return nil
}
- r.logger.Info("Stopping stanza receiver")
+ r.set.Logger.Info("Stopping stanza receiver")
pipelineErr := r.pipe.Stop()
r.converter.Stop()
r.cancel()
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/register.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/register.go
index 8105ef17d5..426e456dec 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/register.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter/register.go
@@ -6,6 +6,7 @@ package adapter // import "github.com/open-telemetry/opentelemetry-collector-con
import (
_ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file" // Register parsers and transformers for stanza-based log receivers
_ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout"
+ _ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container"
_ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/csv"
_ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/json"
_ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/jsonarray"
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/config.go
index 02de7d59e9..5b6a87342e 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/config.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/config.go
@@ -10,7 +10,9 @@ import (
"runtime"
"time"
+ "go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/featuregate"
+ "go.opentelemetry.io/otel/metric"
"go.uber.org/zap"
"golang.org/x/text/encoding"
@@ -33,6 +35,8 @@ const (
defaultMaxConcurrentFiles = 1024
defaultEncoding = "utf-8"
defaultPollInterval = 200 * time.Millisecond
+ openFilesMetric = "fileconsumer/open_files"
+ readingFilesMetric = "fileconsumer/reading_files"
)
var allowFileDeletion = featuregate.GlobalRegistry().MustRegister(
@@ -89,11 +93,11 @@ type HeaderConfig struct {
}
// Deprecated [v0.97.0] Use Build and WithSplitFunc option instead
-func (c Config) BuildWithSplitFunc(logger *zap.SugaredLogger, emit emit.Callback, splitFunc bufio.SplitFunc) (*Manager, error) {
- return c.Build(logger, emit, WithSplitFunc(splitFunc))
+func (c Config) BuildWithSplitFunc(set component.TelemetrySettings, emit emit.Callback, splitFunc bufio.SplitFunc) (*Manager, error) {
+ return c.Build(set, emit, WithSplitFunc(splitFunc))
}
-func (c Config) Build(logger *zap.SugaredLogger, emit emit.Callback, opts ...Option) (*Manager, error) {
+func (c Config) Build(set component.TelemetrySettings, emit emit.Callback, opts ...Option) (*Manager, error) {
if err := c.validate(); err != nil {
return nil, err
}
@@ -136,7 +140,7 @@ func (c Config) Build(logger *zap.SugaredLogger, emit emit.Callback, opts ...Opt
var hCfg *header.Config
if c.Header != nil {
- hCfg, err = header.NewConfig(c.Header.Pattern, c.Header.MetadataOperators, enc)
+ hCfg, err = header.NewConfig(set, c.Header.Pattern, c.Header.MetadataOperators, enc)
if err != nil {
return nil, fmt.Errorf("failed to build header config: %w", err)
}
@@ -147,8 +151,9 @@ func (c Config) Build(logger *zap.SugaredLogger, emit emit.Callback, opts ...Opt
return nil, err
}
+ set.Logger = set.Logger.With(zap.String("component", "fileconsumer"))
readerFactory := reader.Factory{
- SugaredLogger: logger.With("component", "fileconsumer"),
+ TelemetrySettings: set,
FromBeginning: startAtBeginning,
FingerprintSize: int(c.FingerprintSize),
InitialBufferSize: scanner.DefaultBufferSize,
@@ -163,14 +168,41 @@ func (c Config) Build(logger *zap.SugaredLogger, emit emit.Callback, opts ...Opt
DeleteAtEOF: c.DeleteAfterRead,
}
+ var t tracker.Tracker
+ if o.noTracking {
+ t = tracker.NewNoStateTracker(set, c.MaxConcurrentFiles/2)
+ } else {
+ t = tracker.NewFileTracker(set, c.MaxConcurrentFiles/2)
+ }
+
+ meter := set.MeterProvider.Meter("otelcol/fileconsumer")
+
+ openFiles, err := meter.Int64UpDownCounter(
+ openFilesMetric,
+ metric.WithDescription("Number of open files"),
+ metric.WithUnit("1"),
+ )
+ if err != nil {
+ return nil, err
+ }
+ readingFiles, err := meter.Int64UpDownCounter(
+ readingFilesMetric,
+ metric.WithDescription("Number of open files that are being read"),
+ metric.WithUnit("1"),
+ )
+ if err != nil {
+ return nil, err
+ }
return &Manager{
- SugaredLogger: logger.With("component", "fileconsumer"),
+ set: set,
readerFactory: readerFactory,
fileMatcher: fileMatcher,
pollInterval: c.PollInterval,
maxBatchFiles: c.MaxConcurrentFiles / 2,
maxBatches: c.MaxBatches,
- tracker: tracker.New(logger.With("component", "fileconsumer"), c.MaxConcurrentFiles/2),
+ tracker: t,
+ openFiles: openFiles,
+ readingFiles: readingFiles,
}, nil
}
@@ -216,7 +248,8 @@ func (c Config) validate() error {
if c.StartAt == "end" {
return fmt.Errorf("'header' cannot be specified with 'start_at: end'")
}
- if _, errConfig := header.NewConfig(c.Header.Pattern, c.Header.MetadataOperators, enc); errConfig != nil {
+ set := component.TelemetrySettings{Logger: zap.NewNop()}
+ if _, errConfig := header.NewConfig(set, c.Header.Pattern, c.Header.MetadataOperators, enc); errConfig != nil {
return fmt.Errorf("invalid config for 'header': %w", errConfig)
}
}
@@ -229,7 +262,8 @@ func (c Config) validate() error {
}
type options struct {
- splitFunc bufio.SplitFunc
+ splitFunc bufio.SplitFunc
+ noTracking bool
}
type Option func(*options)
@@ -240,3 +274,11 @@ func WithSplitFunc(f bufio.SplitFunc) Option {
o.splitFunc = f
}
}
+
+// WithNoTracking forces the readerFactory to not keep track of files in memory. When used, the reader will
+// read from the beginning of each file every time it is polled.
+func WithNoTracking() Option {
+ return func(o *options) {
+ o.noTracking = true
+ }
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/design.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/design.md
index 34d7fee700..23100d22cd 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/design.md
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/design.md
@@ -206,6 +206,40 @@ When the operator shuts down, the following occurs:
The net effect of the shut down routine is that all files are checkpointed in a normal manner
(i.e. not in the middle of a log entry), and all checkpoints are persisted.
+### Log rotation
+
+#### Supported cases
+
+A) When a file is moved within the pattern with unread logs on the end, then the original is created again,
+ we get the unread logs on the moved as well as any new logs written to the newly created file.
+
+B) When a file is copied within the pattern with unread logs on the end, then the original is truncated,
+ we get the unread logs on the copy as well as any new logs written to the truncated file.
+
+C) When a file it rotated out of pattern via move/create, we detect that
+ our old handle is still valid and we attempt to read from it.
+
+D) When a file it rotated out of pattern via copy/truncate, we detect that
+ our old handle is invalid and we do not attempt to read from it.
+
+
+#### Rotated files that end up within the matching pattern
+
+In both cases of copy/truncate and move/create, if the rotated files match the pattern
+then the old readers that point to the original path will be closed and we will create new
+ones which will be pointing to the rotated file but using the existing metadata's offset.
+The receiver will continue consuming the rotated paths in any case so there will be
+no data loss during the transition.
+The original files will have a fresh fingerprint so they will be consumed by a completely
+new reader.
+
+#### Rotated files that end up out of the matching pattern
+
+In case of a file has been rotated with move/create, the old handle will be pointing
+to the moved file so we can still consume from it even if it's out of the pattern.
+In case of the file has been rotated with copy/truncate, the old handle will be pointing
+to the original file which is truncated. So we don't have a handle in order to consume any remaining
+logs from the moved file. This can cause data loss.
# Known Limitations
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/file.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/file.go
index 94b72b132b..517526ebe5 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/file.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/file.go
@@ -10,6 +10,8 @@ import (
"sync"
"time"
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/otel/metric"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/checkpoint"
@@ -21,18 +23,24 @@ import (
)
type Manager struct {
+ // Deprecated [v0.101.0]
*zap.SugaredLogger
+
+ set component.TelemetrySettings
wg sync.WaitGroup
cancel context.CancelFunc
readerFactory reader.Factory
fileMatcher *matcher.Matcher
- tracker *tracker.Tracker
+ tracker tracker.Tracker
pollInterval time.Duration
persister operator.Persister
maxBatches int
maxBatchFiles int
+
+ openFiles metric.Int64UpDownCounter
+ readingFiles metric.Int64UpDownCounter
}
func (m *Manager) Start(persister operator.Persister) error {
@@ -40,7 +48,7 @@ func (m *Manager) Start(persister operator.Persister) error {
m.cancel = cancel
if _, err := m.fileMatcher.MatchFiles(); err != nil {
- m.Warnf("finding files: %v", err)
+ m.set.Logger.Warn("finding files", zap.Error(err))
}
if persister != nil {
@@ -50,7 +58,7 @@ func (m *Manager) Start(persister operator.Persister) error {
return fmt.Errorf("read known files from database: %w", err)
}
if len(offsets) > 0 {
- m.Infow("Resuming from previously known offset(s). 'start_at' setting is not applicable.")
+ m.set.Logger.Info("Resuming from previously known offset(s). 'start_at' setting is not applicable.")
m.readerFactory.FromBeginning = true
m.tracker.LoadMetadata(offsets)
}
@@ -69,10 +77,10 @@ func (m *Manager) Stop() error {
m.cancel = nil
}
m.wg.Wait()
- m.tracker.ClosePreviousFiles()
+ m.openFiles.Add(context.TODO(), int64(0-m.tracker.ClosePreviousFiles()))
if m.persister != nil {
if err := checkpoint.Save(context.Background(), m.persister, m.tracker.GetMetadata()); err != nil {
- m.Errorw("save offsets", zap.Error(err))
+ m.set.Logger.Error("save offsets", zap.Error(err))
}
}
return nil
@@ -107,9 +115,9 @@ func (m *Manager) poll(ctx context.Context) {
// Get the list of paths on disk
matches, err := m.fileMatcher.MatchFiles()
if err != nil {
- m.Debugf("finding files: %v", err)
+ m.set.Logger.Debug("finding files", zap.Error(err))
}
- m.Debugw("matched files", zap.Strings("paths", matches))
+ m.set.Logger.Debug("matched files", zap.Strings("paths", matches))
for len(matches) > m.maxBatchFiles {
m.consume(ctx, matches[:m.maxBatchFiles])
@@ -129,8 +137,11 @@ func (m *Manager) poll(ctx context.Context) {
// Any new files that appear should be consumed entirely
m.readerFactory.FromBeginning = true
if m.persister != nil {
- if err := checkpoint.Save(context.Background(), m.persister, m.tracker.GetMetadata()); err != nil {
- m.Errorw("save offsets", zap.Error(err))
+ metadata := m.tracker.GetMetadata()
+ if metadata != nil {
+ if err := checkpoint.Save(context.Background(), m.persister, metadata); err != nil {
+ m.set.Logger.Error("save offsets", zap.Error(err))
+ }
}
}
// rotate at end of every poll()
@@ -138,8 +149,8 @@ func (m *Manager) poll(ctx context.Context) {
}
func (m *Manager) consume(ctx context.Context, paths []string) {
- m.Debug("Consuming files", zap.Strings("paths", paths))
- m.makeReaders(paths)
+ m.set.Logger.Debug("Consuming files", zap.Strings("paths", paths))
+ m.makeReaders(ctx, paths)
m.readLostFiles(ctx)
@@ -149,25 +160,27 @@ func (m *Manager) consume(ctx context.Context, paths []string) {
wg.Add(1)
go func(r *reader.Reader) {
defer wg.Done()
+ m.readingFiles.Add(ctx, 1)
r.ReadToEnd(ctx)
+ m.readingFiles.Add(ctx, -1)
}(r)
}
wg.Wait()
- m.tracker.EndConsume()
+ m.openFiles.Add(ctx, int64(0-m.tracker.EndConsume()))
}
func (m *Manager) makeFingerprint(path string) (*fingerprint.Fingerprint, *os.File) {
file, err := os.Open(path) // #nosec - operator must read in files defined by user
if err != nil {
- m.Errorw("Failed to open file", zap.Error(err))
+ m.set.Logger.Error("Failed to open file", zap.Error(err))
return nil, nil
}
fp, err := m.readerFactory.NewFingerprint(file)
if err != nil {
if err = file.Close(); err != nil {
- m.Debugw("problem closing file", zap.Error(err))
+ m.set.Logger.Debug("problem closing file", zap.Error(err))
}
return nil, nil
}
@@ -175,7 +188,7 @@ func (m *Manager) makeFingerprint(path string) (*fingerprint.Fingerprint, *os.Fi
if fp.Len() == 0 {
// Empty file, don't read it until we can compare its fingerprint
if err = file.Close(); err != nil {
- m.Debugw("problem closing file", zap.Error(err))
+ m.set.Logger.Debug("problem closing file", zap.Error(err))
}
return nil, nil
}
@@ -185,7 +198,7 @@ func (m *Manager) makeFingerprint(path string) (*fingerprint.Fingerprint, *os.Fi
// makeReader take a file path, then creates reader,
// discarding any that have a duplicate fingerprint to other files that have already
// been read this polling interval
-func (m *Manager) makeReaders(paths []string) {
+func (m *Manager) makeReaders(ctx context.Context, paths []string) {
for _, path := range paths {
fp, file := m.makeFingerprint(path)
if fp == nil {
@@ -195,17 +208,18 @@ func (m *Manager) makeReaders(paths []string) {
// Exclude duplicate paths with the same content. This can happen when files are
// being rotated with copy/truncate strategy. (After copy, prior to truncate.)
if r := m.tracker.GetCurrentFile(fp); r != nil {
+ m.set.Logger.Debug("Skipping duplicate file", zap.String("path", file.Name()))
// re-add the reader as Match() removes duplicates
m.tracker.Add(r)
if err := file.Close(); err != nil {
- m.Debugw("problem closing file", zap.Error(err))
+ m.set.Logger.Debug("problem closing file", zap.Error(err))
}
continue
}
- r, err := m.newReader(file, fp)
+ r, err := m.newReader(ctx, file, fp)
if err != nil {
- m.Errorw("Failed to create reader", zap.Error(err))
+ m.set.Logger.Error("Failed to create reader", zap.Error(err))
continue
}
@@ -213,18 +227,41 @@ func (m *Manager) makeReaders(paths []string) {
}
}
-func (m *Manager) newReader(file *os.File, fp *fingerprint.Fingerprint) (*reader.Reader, error) {
+func (m *Manager) newReader(ctx context.Context, file *os.File, fp *fingerprint.Fingerprint) (*reader.Reader, error) {
// Check previous poll cycle for match
if oldReader := m.tracker.GetOpenFile(fp); oldReader != nil {
+ if oldReader.GetFileName() != file.Name() {
+ if !oldReader.Validate() {
+ m.set.Logger.Debug(
+ "File has been rotated(truncated)",
+ zap.String("original_path", oldReader.GetFileName()),
+ zap.String("rotated_path", file.Name()))
+ } else {
+ m.set.Logger.Debug(
+ "File has been rotated(moved)",
+ zap.String("original_path", oldReader.GetFileName()),
+ zap.String("rotated_path", file.Name()))
+ }
+ }
return m.readerFactory.NewReaderFromMetadata(file, oldReader.Close())
}
- // Cleck for closed files for match
+ // Check for closed files for match
if oldMetadata := m.tracker.GetClosedFile(fp); oldMetadata != nil {
- return m.readerFactory.NewReaderFromMetadata(file, oldMetadata)
+ r, err := m.readerFactory.NewReaderFromMetadata(file, oldMetadata)
+ if err != nil {
+ return nil, err
+ }
+ m.openFiles.Add(ctx, 1)
+ return r, nil
}
// If we don't match any previously known files, create a new reader from scratch
- m.Infow("Started watching file", "path", file.Name())
- return m.readerFactory.NewReader(file, fp)
+ m.set.Logger.Info("Started watching file", zap.String("path", file.Name()))
+ r, err := m.readerFactory.NewReader(file, fp)
+ if err != nil {
+ return nil, err
+ }
+ m.openFiles.Add(ctx, 1)
+ return r, nil
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/file_other.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/file_other.go
index 816a0cee50..5acc955e84 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/file_other.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/file_other.go
@@ -9,6 +9,8 @@ import (
"context"
"sync"
+ "go.uber.org/zap"
+
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader"
)
@@ -34,13 +36,17 @@ OUTER:
continue
}
- // At this point, we know that the file has been rotated. However, we do not know
- // if it was moved or truncated. If truncated, then both handles point to the same
- // file, in which case we should only read from it using the new reader. We can use
+ // At this point, we know that the file has been rotated out of the matching pattern.
+ // However, we do not know if it was moved or truncated.
+ // If truncated, then both handles point to the same file, in which case
+ // we should only read from it using the new reader. We can use
// the Validate method to ensure that the file has not been truncated.
if !oldReader.Validate() {
+ m.set.Logger.Debug("File has been rotated(truncated)", zap.String("path", oldReader.GetFileName()))
continue OUTER
}
+ // oldreader points to the rotated file after the move/rename. We can still read from it.
+ m.set.Logger.Debug("File has been rotated(moved)", zap.String("path", oldReader.GetFileName()))
}
lostReaders = append(lostReaders, oldReader)
}
@@ -48,9 +54,12 @@ OUTER:
var lostWG sync.WaitGroup
for _, lostReader := range lostReaders {
lostWG.Add(1)
+ m.set.Logger.Debug("Reading lost file", zap.String("path", lostReader.GetFileName()))
go func(r *reader.Reader) {
defer lostWG.Done()
+ m.readingFiles.Add(ctx, 1)
r.ReadToEnd(ctx)
+ m.readingFiles.Add(ctx, -1)
}(lostReader)
}
lostWG.Wait()
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/config.go
index 618d1b1fa7..768fc1611d 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/config.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/config.go
@@ -10,7 +10,7 @@ import (
"fmt"
"regexp"
- "go.uber.org/zap"
+ "go.opentelemetry.io/collector/component"
"golang.org/x/text/encoding"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator"
@@ -25,7 +25,7 @@ type Config struct {
metadataOperators []operator.Config
}
-func NewConfig(matchRegex string, metadataOperators []operator.Config, enc encoding.Encoding) (*Config, error) {
+func NewConfig(set component.TelemetrySettings, matchRegex string, metadataOperators []operator.Config, enc encoding.Encoding) (*Config, error) {
var err error
if len(metadataOperators) == 0 {
return nil, errors.New("at least one operator must be specified for `metadata_operators`")
@@ -35,11 +35,10 @@ func NewConfig(matchRegex string, metadataOperators []operator.Config, enc encod
return nil, errors.New("encoding must be specified")
}
- nopLogger := zap.NewNop().Sugar()
p, err := pipeline.Config{
Operators: metadataOperators,
- DefaultOutput: newPipelineOutput(nopLogger),
- }.Build(nopLogger)
+ DefaultOutput: newPipelineOutput(set),
+ }.Build(set)
if err != nil {
return nil, fmt.Errorf("failed to build pipelines: %w", err)
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/output.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/output.go
index 7caf7f9a89..7e76cc8bb2 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/output.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/output.go
@@ -7,7 +7,7 @@ import (
"context"
"fmt"
- "go.uber.org/zap"
+ "go.opentelemetry.io/collector/component"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper"
@@ -22,16 +22,11 @@ type pipelineOutput struct {
}
// newPipelineOutput creates a new receiver output
-func newPipelineOutput(logger *zap.SugaredLogger) *pipelineOutput {
+func newPipelineOutput(set component.TelemetrySettings) *pipelineOutput {
+ op, _ := helper.NewOutputConfig(pipelineOutputType, pipelineOutputType).Build(set)
return &pipelineOutput{
- OutputOperator: helper.OutputOperator{
- BasicOperator: helper.BasicOperator{
- OperatorID: pipelineOutputType,
- OperatorType: pipelineOutputType,
- SugaredLogger: logger,
- },
- },
- logChan: make(chan *entry.Entry, 1),
+ OutputOperator: op,
+ logChan: make(chan *entry.Entry, 1),
}
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/reader.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/reader.go
index f55b2323cf..27a81c338b 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/reader.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/header/reader.go
@@ -8,8 +8,8 @@ import (
"errors"
"fmt"
+ "go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/extension/experimental/storage"
- "go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/pipeline"
@@ -18,20 +18,20 @@ import (
var ErrEndOfHeader = errors.New("end of header")
type Reader struct {
- logger *zap.SugaredLogger
+ set component.TelemetrySettings
cfg Config
pipeline pipeline.Pipeline
output *pipelineOutput
}
-func NewReader(logger *zap.SugaredLogger, cfg Config) (*Reader, error) {
- r := &Reader{logger: logger, cfg: cfg}
+func NewReader(set component.TelemetrySettings, cfg Config) (*Reader, error) {
+ r := &Reader{set: set, cfg: cfg}
var err error
- r.output = newPipelineOutput(logger)
+ r.output = newPipelineOutput(set)
r.pipeline, err = pipeline.Config{
Operators: cfg.metadataOperators,
DefaultOutput: r.output,
- }.Build(logger)
+ }.Build(set)
if err != nil {
return nil, fmt.Errorf("failed to build pipeline: %w", err)
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader/factory.go
index 1e1d37b0db..bd3508fa68 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader/factory.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader/factory.go
@@ -10,6 +10,7 @@ import (
"os"
"time"
+ "go.opentelemetry.io/collector/component"
"go.uber.org/zap"
"golang.org/x/text/encoding"
@@ -28,7 +29,7 @@ const (
)
type Factory struct {
- *zap.SugaredLogger
+ component.TelemetrySettings
HeaderConfig *header.Config
FromBeginning bool
FingerprintSize int
@@ -60,9 +61,10 @@ func (f *Factory) NewReader(file *os.File, fp *fingerprint.Fingerprint) (*Reader
}
func (f *Factory) NewReaderFromMetadata(file *os.File, m *Metadata) (r *Reader, err error) {
+
r = &Reader{
Metadata: m,
- logger: f.SugaredLogger.With("path", file.Name()),
+ set: f.TelemetrySettings,
file: file,
fileName: file.Name(),
fingerprintSize: f.FingerprintSize,
@@ -72,6 +74,7 @@ func (f *Factory) NewReaderFromMetadata(file *os.File, m *Metadata) (r *Reader,
lineSplitFunc: f.SplitFunc,
deleteAtEOF: f.DeleteAtEOF,
}
+ r.set.Logger = r.set.Logger.With(zap.String("path", r.fileName))
if r.Fingerprint.Len() > r.fingerprintSize {
// User has reconfigured fingerprint_size
@@ -100,7 +103,7 @@ func (f *Factory) NewReaderFromMetadata(file *os.File, m *Metadata) (r *Reader,
r.splitFunc = r.lineSplitFunc
r.processFunc = r.emitFunc
} else {
- r.headerReader, err = header.NewReader(f.SugaredLogger, *f.HeaderConfig)
+ r.headerReader, err = header.NewReader(f.TelemetrySettings, *f.HeaderConfig)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader/reader.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader/reader.go
index ae5f4a5e29..372f42e48d 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader/reader.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader/reader.go
@@ -9,6 +9,7 @@ import (
"errors"
"os"
+ "go.opentelemetry.io/collector/component"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/decode"
@@ -30,7 +31,7 @@ type Metadata struct {
// Reader manages a single file
type Reader struct {
*Metadata
- logger *zap.SugaredLogger
+ set component.TelemetrySettings
fileName string
file *os.File
fingerprintSize int
@@ -49,7 +50,7 @@ type Reader struct {
// ReadToEnd will read until the end of the file
func (r *Reader) ReadToEnd(ctx context.Context) {
if _, err := r.file.Seek(r.Offset, 0); err != nil {
- r.logger.Errorw("Failed to seek", zap.Error(err))
+ r.set.Logger.Error("Failed to seek", zap.Error(err))
return
}
@@ -72,7 +73,7 @@ func (r *Reader) ReadToEnd(ctx context.Context) {
ok := s.Scan()
if !ok {
if err := s.Error(); err != nil {
- r.logger.Errorw("Failed during scan", zap.Error(err))
+ r.set.Logger.Error("Failed during scan", zap.Error(err))
} else if r.deleteAtEOF {
r.delete()
}
@@ -81,7 +82,7 @@ func (r *Reader) ReadToEnd(ctx context.Context) {
token, err := r.decoder.Decode(s.Bytes())
if err != nil {
- r.logger.Errorw("decode: %w", zap.Error(err))
+ r.set.Logger.Error("decode: %w", zap.Error(err))
r.Offset = s.Pos() // move past the bad token or we may be stuck
continue
}
@@ -93,14 +94,14 @@ func (r *Reader) ReadToEnd(ctx context.Context) {
}
if !errors.Is(err, header.ErrEndOfHeader) {
- r.logger.Errorw("process: %w", zap.Error(err))
+ r.set.Logger.Error("process: %w", zap.Error(err))
r.Offset = s.Pos() // move past the bad token or we may be stuck
continue
}
// Clean up the header machinery
if err = r.headerReader.Stop(); err != nil {
- r.logger.Errorw("Failed to stop header pipeline during finalization", zap.Error(err))
+ r.set.Logger.Error("Failed to stop header pipeline during finalization", zap.Error(err))
}
r.headerReader = nil
r.HeaderFinalized = true
@@ -113,7 +114,7 @@ func (r *Reader) ReadToEnd(ctx context.Context) {
// Do not use the updated offset from the old scanner, as the most recent token
// could be split differently with the new splitter.
if _, err = r.file.Seek(r.Offset, 0); err != nil {
- r.logger.Errorw("Failed to seek post-header", zap.Error(err))
+ r.set.Logger.Error("Failed to seek post-header", zap.Error(err))
return
}
s = scanner.New(r, r.maxLogSize, scanner.DefaultBufferSize, r.Offset, r.splitFunc)
@@ -124,7 +125,7 @@ func (r *Reader) ReadToEnd(ctx context.Context) {
func (r *Reader) delete() {
r.close()
if err := os.Remove(r.fileName); err != nil {
- r.logger.Errorf("could not delete %s", r.fileName)
+ r.set.Logger.Error("could not delete", zap.String("filename", r.fileName))
}
}
@@ -139,14 +140,14 @@ func (r *Reader) Close() *Metadata {
func (r *Reader) close() {
if r.file != nil {
if err := r.file.Close(); err != nil {
- r.logger.Debugw("Problem closing reader", zap.Error(err))
+ r.set.Logger.Debug("Problem closing reader", zap.Error(err))
}
r.file = nil
}
if r.headerReader != nil {
if err := r.headerReader.Stop(); err != nil {
- r.logger.Errorw("Failed to stop header pipeline", zap.Error(err))
+ r.set.Logger.Error("Failed to stop header pipeline", zap.Error(err))
}
}
}
@@ -183,6 +184,10 @@ func (r *Reader) Validate() bool {
return false
}
+func (r *Reader) GetFileName() string {
+ return r.fileName
+}
+
func (m Metadata) GetFingerprint() *fingerprint.Fingerprint {
return m.Fingerprint
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker.go
index 6c255f2fc0..5039003a36 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker.go
@@ -4,6 +4,7 @@
package tracker // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker"
import (
+ "go.opentelemetry.io/collector/component"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/fileset"
@@ -11,8 +12,25 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/reader"
)
-type Tracker struct {
- *zap.SugaredLogger
+// Interface for tracking files that are being consumed.
+type Tracker interface {
+ Add(reader *reader.Reader)
+ GetCurrentFile(fp *fingerprint.Fingerprint) *reader.Reader
+ GetOpenFile(fp *fingerprint.Fingerprint) *reader.Reader
+ GetClosedFile(fp *fingerprint.Fingerprint) *reader.Metadata
+ GetMetadata() []*reader.Metadata
+ LoadMetadata(metadata []*reader.Metadata)
+ CurrentPollFiles() []*reader.Reader
+ PreviousPollFiles() []*reader.Reader
+ ClosePreviousFiles() int
+ EndPoll()
+ EndConsume() int
+ TotalReaders() int
+}
+
+// fileTracker tracks known offsets for files that are being consumed by the manager.
+type fileTracker struct {
+ set component.TelemetrySettings
maxBatchFiles int
@@ -21,13 +39,14 @@ type Tracker struct {
knownFiles []*fileset.Fileset[*reader.Metadata]
}
-func New(logger *zap.SugaredLogger, maxBatchFiles int) *Tracker {
+func NewFileTracker(set component.TelemetrySettings, maxBatchFiles int) Tracker {
knownFiles := make([]*fileset.Fileset[*reader.Metadata], 3)
for i := 0; i < len(knownFiles); i++ {
knownFiles[i] = fileset.New[*reader.Metadata](maxBatchFiles)
}
- return &Tracker{
- SugaredLogger: logger.With("component", "fileconsumer"),
+ set.Logger = set.Logger.With(zap.String("tracker", "fileTracker"))
+ return &fileTracker{
+ set: set,
maxBatchFiles: maxBatchFiles,
currentPollFiles: fileset.New[*reader.Reader](maxBatchFiles),
previousPollFiles: fileset.New[*reader.Reader](maxBatchFiles),
@@ -35,20 +54,20 @@ func New(logger *zap.SugaredLogger, maxBatchFiles int) *Tracker {
}
}
-func (t *Tracker) Add(reader *reader.Reader) {
+func (t *fileTracker) Add(reader *reader.Reader) {
// add a new reader for tracking
t.currentPollFiles.Add(reader)
}
-func (t *Tracker) GetCurrentFile(fp *fingerprint.Fingerprint) *reader.Reader {
+func (t *fileTracker) GetCurrentFile(fp *fingerprint.Fingerprint) *reader.Reader {
return t.currentPollFiles.Match(fp, fileset.Equal)
}
-func (t *Tracker) GetOpenFile(fp *fingerprint.Fingerprint) *reader.Reader {
+func (t *fileTracker) GetOpenFile(fp *fingerprint.Fingerprint) *reader.Reader {
return t.previousPollFiles.Match(fp, fileset.StartsWith)
}
-func (t *Tracker) GetClosedFile(fp *fingerprint.Fingerprint) *reader.Metadata {
+func (t *fileTracker) GetClosedFile(fp *fingerprint.Fingerprint) *reader.Metadata {
for i := 0; i < len(t.knownFiles); i++ {
if oldMetadata := t.knownFiles[i].Match(fp, fileset.StartsWith); oldMetadata != nil {
return oldMetadata
@@ -57,7 +76,7 @@ func (t *Tracker) GetClosedFile(fp *fingerprint.Fingerprint) *reader.Metadata {
return nil
}
-func (t *Tracker) GetMetadata() []*reader.Metadata {
+func (t *fileTracker) GetMetadata() []*reader.Metadata {
// return all known metadata for checkpoining
allCheckpoints := make([]*reader.Metadata, 0, t.TotalReaders())
for _, knownFiles := range t.knownFiles {
@@ -70,37 +89,93 @@ func (t *Tracker) GetMetadata() []*reader.Metadata {
return allCheckpoints
}
-func (t *Tracker) LoadMetadata(metadata []*reader.Metadata) {
+func (t *fileTracker) LoadMetadata(metadata []*reader.Metadata) {
t.knownFiles[0].Add(metadata...)
}
-func (t *Tracker) CurrentPollFiles() []*reader.Reader {
+func (t *fileTracker) CurrentPollFiles() []*reader.Reader {
return t.currentPollFiles.Get()
}
-func (t *Tracker) PreviousPollFiles() []*reader.Reader {
+func (t *fileTracker) PreviousPollFiles() []*reader.Reader {
return t.previousPollFiles.Get()
}
-func (t *Tracker) ClosePreviousFiles() {
+func (t *fileTracker) ClosePreviousFiles() (filesClosed int) {
// t.previousPollFiles -> t.knownFiles[0]
-
for r, _ := t.previousPollFiles.Pop(); r != nil; r, _ = t.previousPollFiles.Pop() {
t.knownFiles[0].Add(r.Close())
+ filesClosed++
}
+ return
}
-func (t *Tracker) EndPoll() {
+func (t *fileTracker) EndPoll() {
// shift the filesets at end of every poll() call
// t.knownFiles[0] -> t.knownFiles[1] -> t.knownFiles[2]
copy(t.knownFiles[1:], t.knownFiles)
t.knownFiles[0] = fileset.New[*reader.Metadata](t.maxBatchFiles)
}
-func (t *Tracker) TotalReaders() int {
+func (t *fileTracker) TotalReaders() int {
total := t.previousPollFiles.Len()
for i := 0; i < len(t.knownFiles); i++ {
total += t.knownFiles[i].Len()
}
return total
}
+
+// noStateTracker only tracks the current polled files. Once the poll is
+// complete and telemetry is consumed, the tracked files are closed. The next
+// poll will create fresh readers with no previously tracked offsets.
+type noStateTracker struct {
+ set component.TelemetrySettings
+ maxBatchFiles int
+ currentPollFiles *fileset.Fileset[*reader.Reader]
+}
+
+func NewNoStateTracker(set component.TelemetrySettings, maxBatchFiles int) Tracker {
+ set.Logger = set.Logger.With(zap.String("tracker", "noStateTracker"))
+ return &noStateTracker{
+ set: set,
+ maxBatchFiles: maxBatchFiles,
+ currentPollFiles: fileset.New[*reader.Reader](maxBatchFiles),
+ }
+}
+
+func (t *noStateTracker) Add(reader *reader.Reader) {
+ // add a new reader for tracking
+ t.currentPollFiles.Add(reader)
+}
+
+func (t *noStateTracker) CurrentPollFiles() []*reader.Reader {
+ return t.currentPollFiles.Get()
+}
+
+func (t *noStateTracker) GetCurrentFile(fp *fingerprint.Fingerprint) *reader.Reader {
+ return t.currentPollFiles.Match(fp, fileset.Equal)
+}
+
+func (t *noStateTracker) EndConsume() (filesClosed int) {
+ for r, _ := t.currentPollFiles.Pop(); r != nil; r, _ = t.currentPollFiles.Pop() {
+ r.Close()
+ filesClosed++
+ }
+ return
+}
+
+func (t *noStateTracker) GetOpenFile(_ *fingerprint.Fingerprint) *reader.Reader { return nil }
+
+func (t *noStateTracker) GetClosedFile(_ *fingerprint.Fingerprint) *reader.Metadata { return nil }
+
+func (t *noStateTracker) GetMetadata() []*reader.Metadata { return nil }
+
+func (t *noStateTracker) LoadMetadata(_ []*reader.Metadata) {}
+
+func (t *noStateTracker) PreviousPollFiles() []*reader.Reader { return nil }
+
+func (t *noStateTracker) ClosePreviousFiles() int { return 0 }
+
+func (t *noStateTracker) EndPoll() {}
+
+func (t *noStateTracker) TotalReaders() int { return 0 }
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker_other.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker_other.go
index d23ebf9521..84ea4cb653 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker_other.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker_other.go
@@ -12,10 +12,11 @@ import (
// On non-windows platforms, we keep files open between poll cycles so that we can detect
// and read "lost" files, which have been moved out of the matching pattern.
-func (t *Tracker) EndConsume() {
- t.ClosePreviousFiles()
+func (t *fileTracker) EndConsume() (filesClosed int) {
+ filesClosed = t.ClosePreviousFiles()
// t.currentPollFiles -> t.previousPollFiles
t.previousPollFiles = t.currentPollFiles
t.currentPollFiles = fileset.New[*reader.Reader](t.maxBatchFiles)
+ return
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker_windows.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker_windows.go
index 98cb0d6e57..dd9b19504c 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker_windows.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/tracker/tracker_windows.go
@@ -12,9 +12,10 @@ import (
)
// On windows, we close files immediately after reading because they cannot be moved while open.
-func (t *Tracker) EndConsume() {
+func (t *fileTracker) EndConsume() (filesClosed int) {
// t.currentPollFiles -> t.previousPollFiles
t.previousPollFiles = t.currentPollFiles
- t.ClosePreviousFiles()
+ filesClosed = t.ClosePreviousFiles()
t.currentPollFiles = fileset.New[*reader.Reader](t.maxBatchFiles)
+ return
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/internal/filter/exclude.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/internal/filter/exclude.go
new file mode 100644
index 0000000000..8e6e254f59
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/internal/filter/exclude.go
@@ -0,0 +1,40 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package filter // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/internal/filter"
+import (
+ "os"
+ "time"
+
+ "go.uber.org/multierr"
+)
+
+type excludeOlderThanOption struct {
+ age time.Duration
+}
+
+func (eot excludeOlderThanOption) apply(items []*item) ([]*item, error) {
+ filteredItems := make([]*item, 0, len(items))
+ var errs error
+ for _, item := range items {
+ fi, err := os.Stat(item.value)
+ if err != nil {
+ errs = multierr.Append(errs, err)
+ continue
+ }
+
+ // Keep (include) the file if its age (since last modification)
+ // is the same or less than the configured age.
+ fileAge := time.Since(fi.ModTime())
+ if fileAge <= eot.age {
+ filteredItems = append(filteredItems, item)
+ }
+ }
+
+ return filteredItems, errs
+}
+
+// ExcludeOlderThan excludes files whose modification time is older than the specified age.
+func ExcludeOlderThan(age time.Duration) Option {
+ return excludeOlderThanOption{age: age}
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/internal/filter/sort.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/internal/filter/sort.go
index 67002f7e34..2ca233bd38 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/internal/filter/sort.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/internal/filter/sort.go
@@ -132,7 +132,19 @@ func SortTemporal(regexKey string, ascending bool, layout string, location strin
)
}
-type mtimeSortOption struct{}
+type TopNOption int
+
+//nolint:unparam
+func (t TopNOption) apply(items []*item) ([]*item, error) {
+ if len(items) <= int(t) {
+ return items, nil
+ }
+ return items[:t], nil
+}
+
+type mtimeSortOption struct {
+ ascending bool
+}
type mtimeItem struct {
mtime time.Time
@@ -158,10 +170,20 @@ func (m mtimeSortOption) apply(items []*item) ([]*item, error) {
})
}
- sort.SliceStable(mtimeItems, func(i, j int) bool {
- // This checks if item i > j, in order to reverse the sort (most recently modified file is first in the list)
- return mtimeItems[i].mtime.After(mtimeItems[j].mtime)
- })
+ var lessFunc func(i, j int) bool
+ if m.ascending {
+ lessFunc = func(i, j int) bool {
+ // This checks if item i < j
+ return mtimeItems[i].mtime.Before(mtimeItems[j].mtime)
+ }
+ } else {
+ lessFunc = func(i, j int) bool {
+ // This checks if item i > j, in order to reverse the sort (most recently modified file is first in the list)
+ return mtimeItems[i].mtime.After(mtimeItems[j].mtime)
+ }
+ }
+
+ sort.SliceStable(mtimeItems, lessFunc)
filteredValues := make([]*item, 0, len(items))
for _, mtimeItem := range mtimeItems {
@@ -171,6 +193,8 @@ func (m mtimeSortOption) apply(items []*item) ([]*item, error) {
return filteredValues, errs
}
-func SortMtime() Option {
- return mtimeSortOption{}
+func SortMtime(ascending bool) Option {
+ return mtimeSortOption{
+ ascending: ascending,
+ }
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/matcher.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/matcher.go
index a1fc7109a1..948f18852a 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/matcher.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/matcher/matcher.go
@@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"regexp"
+ "time"
"go.opentelemetry.io/collector/featuregate"
@@ -33,8 +34,12 @@ var mtimeSortTypeFeatureGate = featuregate.GlobalRegistry().MustRegister(
)
type Criteria struct {
- Include []string `mapstructure:"include,omitempty"`
- Exclude []string `mapstructure:"exclude,omitempty"`
+ Include []string `mapstructure:"include,omitempty"`
+ Exclude []string `mapstructure:"exclude,omitempty"`
+
+ // ExcludeOlderThan allows excluding files whose modification time is older
+ // than the specified age.
+ ExcludeOlderThan time.Duration `mapstructure:"exclude_older_than"`
OrderingCriteria OrderingCriteria `mapstructure:"ordering_criteria,omitempty"`
}
@@ -66,11 +71,17 @@ func New(c Criteria) (*Matcher, error) {
return nil, fmt.Errorf("exclude: %w", err)
}
+ m := &Matcher{
+ include: c.Include,
+ exclude: c.Exclude,
+ }
+
+ if c.ExcludeOlderThan != 0 {
+ m.filterOpts = append(m.filterOpts, filter.ExcludeOlderThan(c.ExcludeOlderThan))
+ }
+
if len(c.OrderingCriteria.SortBy) == 0 {
- return &Matcher{
- include: c.Include,
- exclude: c.Exclude,
- }, nil
+ return m, nil
}
if c.OrderingCriteria.TopN < 0 {
@@ -92,9 +103,10 @@ func New(c Criteria) (*Matcher, error) {
if err != nil {
return nil, fmt.Errorf("compile regex: %w", err)
}
+
+ m.regex = regex
}
- var filterOpts []filter.Option
for _, sc := range c.OrderingCriteria.SortBy {
switch sc.SortType {
case sortTypeNumeric:
@@ -102,36 +114,32 @@ func New(c Criteria) (*Matcher, error) {
if err != nil {
return nil, fmt.Errorf("numeric sort: %w", err)
}
- filterOpts = append(filterOpts, f)
+ m.filterOpts = append(m.filterOpts, f)
case sortTypeAlphabetical:
f, err := filter.SortAlphabetical(sc.RegexKey, sc.Ascending)
if err != nil {
return nil, fmt.Errorf("alphabetical sort: %w", err)
}
- filterOpts = append(filterOpts, f)
+ m.filterOpts = append(m.filterOpts, f)
case sortTypeTimestamp:
f, err := filter.SortTemporal(sc.RegexKey, sc.Ascending, sc.Layout, sc.Location)
if err != nil {
return nil, fmt.Errorf("timestamp sort: %w", err)
}
- filterOpts = append(filterOpts, f)
+ m.filterOpts = append(m.filterOpts, f)
case sortTypeMtime:
if !mtimeSortTypeFeatureGate.IsEnabled() {
return nil, fmt.Errorf("the %q feature gate must be enabled to use %q sort type", mtimeSortTypeFeatureGate.ID(), sortTypeMtime)
}
- filterOpts = append(filterOpts, filter.SortMtime())
+ m.filterOpts = append(m.filterOpts, filter.SortMtime(sc.Ascending))
default:
return nil, fmt.Errorf("'sort_type' must be specified")
}
}
- return &Matcher{
- include: c.Include,
- exclude: c.Exclude,
- regex: regex,
- topN: c.OrderingCriteria.TopN,
- filterOpts: filterOpts,
- }, nil
+ m.filterOpts = append(m.filterOpts, filter.TopNOption(c.OrderingCriteria.TopN))
+
+ return m, nil
}
// orderingCriteriaNeedsRegex returns true if any of the sort options require a regex to be set.
@@ -149,7 +157,6 @@ type Matcher struct {
include []string
exclude []string
regex *regexp.Regexp
- topN int
filterOpts []filter.Option
}
@@ -171,10 +178,5 @@ func (m Matcher) MatchFiles() ([]string, error) {
if len(result) == 0 {
return result, errors.Join(err, errs)
}
-
- if len(result) <= m.topN {
- return result, errors.Join(err, errs)
- }
-
- return result[:m.topN], errors.Join(err, errs)
+ return result, errs
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/flush/flush.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/flush/flush.go
index 0b239b4aee..afa03fe92f 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/flush/flush.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/flush/flush.go
@@ -33,7 +33,6 @@ func (s *State) Func(splitFunc bufio.SplitFunc, period time.Duration) bufio.Spli
return func(data []byte, atEOF bool) (int, []byte, error) {
advance, token, err := splitFunc(data, atEOF)
-
// Don't interfere with errors
if err != nil {
return advance, token, err
@@ -52,6 +51,13 @@ func (s *State) Func(splitFunc bufio.SplitFunc, period time.Duration) bufio.Spli
return 0, nil, nil
}
+ // We're seeing new data so postpone the next flush
+ if len(data) > s.LastDataLength {
+ s.LastDataChange = time.Now()
+ s.LastDataLength = len(data)
+ return 0, nil, nil
+ }
+
// Flush timed out
if time.Since(s.LastDataChange) > period {
s.LastDataChange = time.Now()
@@ -59,12 +65,6 @@ func (s *State) Func(splitFunc bufio.SplitFunc, period time.Duration) bufio.Spli
return len(data), data, nil
}
- // We're seeing new data so postpone the next flush
- if len(data) > s.LastDataLength {
- s.LastDataChange = time.Now()
- s.LastDataLength = len(data)
- }
-
// Ask for more data
return 0, nil, nil
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/config.go
index f9934c6a06..f1f9781aeb 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/config.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/config.go
@@ -7,8 +7,8 @@ import (
"encoding/json"
"fmt"
+ "go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/confmap"
- "go.uber.org/zap"
)
// Config is the configuration of an operator
@@ -25,7 +25,7 @@ func NewConfig(b Builder) Config {
type Builder interface {
ID() string
Type() string
- Build(*zap.SugaredLogger) (Operator, error)
+ Build(component.TelemetrySettings) (Operator, error)
SetID(string)
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/emitter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/emitter.go
new file mode 100644
index 0000000000..dcff31667a
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/emitter.go
@@ -0,0 +1,176 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package helper // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper"
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "go.opentelemetry.io/collector/component"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator"
+)
+
+// LogEmitter is a stanza operator that emits log entries to a channel
+type LogEmitter struct {
+ OutputOperator
+ logChan chan []*entry.Entry
+ stopOnce sync.Once
+ cancel context.CancelFunc
+ batchMux sync.Mutex
+ batch []*entry.Entry
+ wg sync.WaitGroup
+ maxBatchSize uint
+ flushInterval time.Duration
+}
+
+var (
+ defaultFlushInterval = 100 * time.Millisecond
+ defaultMaxBatchSize uint = 100
+)
+
+type EmitterOption interface {
+ apply(*LogEmitter)
+}
+
+func WithMaxBatchSize(maxBatchSize uint) EmitterOption {
+ return maxBatchSizeOption{maxBatchSize}
+}
+
+type maxBatchSizeOption struct {
+ maxBatchSize uint
+}
+
+func (o maxBatchSizeOption) apply(e *LogEmitter) {
+ e.maxBatchSize = o.maxBatchSize
+}
+
+func WithFlushInterval(flushInterval time.Duration) EmitterOption {
+ return flushIntervalOption{flushInterval}
+}
+
+type flushIntervalOption struct {
+ flushInterval time.Duration
+}
+
+func (o flushIntervalOption) apply(e *LogEmitter) {
+ e.flushInterval = o.flushInterval
+}
+
+// NewLogEmitter creates a new receiver output
+func NewLogEmitter(set component.TelemetrySettings, opts ...EmitterOption) *LogEmitter {
+ op, _ := NewOutputConfig("log_emitter", "log_emitter").Build(set)
+ e := &LogEmitter{
+ OutputOperator: op,
+ logChan: make(chan []*entry.Entry),
+ maxBatchSize: defaultMaxBatchSize,
+ batch: make([]*entry.Entry, 0, defaultMaxBatchSize),
+ flushInterval: defaultFlushInterval,
+ cancel: func() {},
+ }
+ for _, opt := range opts {
+ opt.apply(e)
+ }
+ return e
+}
+
+// Start starts the goroutine(s) required for this operator
+func (e *LogEmitter) Start(_ operator.Persister) error {
+ ctx, cancel := context.WithCancel(context.Background())
+ e.cancel = cancel
+
+ e.wg.Add(1)
+ go e.flusher(ctx)
+ return nil
+}
+
+// Stop will close the log channel and stop running goroutines
+func (e *LogEmitter) Stop() error {
+ e.stopOnce.Do(func() {
+ e.cancel()
+ e.wg.Wait()
+
+ close(e.logChan)
+ })
+
+ return nil
+}
+
+// OutChannel returns the channel on which entries will be sent to.
+func (e *LogEmitter) OutChannel() <-chan []*entry.Entry {
+ return e.logChan
+}
+
+// OutChannelForWrite returns the channel on which entries can be sent to.
+func (e *LogEmitter) OutChannelForWrite() chan []*entry.Entry {
+ return e.logChan
+}
+
+// Process will emit an entry to the output channel
+func (e *LogEmitter) Process(ctx context.Context, ent *entry.Entry) error {
+ if oldBatch := e.appendEntry(ent); len(oldBatch) > 0 {
+ e.flush(ctx, oldBatch)
+ }
+
+ return nil
+}
+
+// appendEntry appends the entry to the current batch. If maxBatchSize is reached, a new batch will be made, and the old batch
+// (which should be flushed) will be returned
+func (e *LogEmitter) appendEntry(ent *entry.Entry) []*entry.Entry {
+ e.batchMux.Lock()
+ defer e.batchMux.Unlock()
+
+ e.batch = append(e.batch, ent)
+ if uint(len(e.batch)) >= e.maxBatchSize {
+ var oldBatch []*entry.Entry
+ oldBatch, e.batch = e.batch, make([]*entry.Entry, 0, e.maxBatchSize)
+ return oldBatch
+ }
+
+ return nil
+}
+
+// flusher flushes the current batch every flush interval. Intended to be run as a goroutine
+func (e *LogEmitter) flusher(ctx context.Context) {
+ defer e.wg.Done()
+
+ ticker := time.NewTicker(e.flushInterval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ if oldBatch := e.makeNewBatch(); len(oldBatch) > 0 {
+ e.flush(ctx, oldBatch)
+ }
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+// flush flushes the provided batch to the log channel.
+func (e *LogEmitter) flush(ctx context.Context, batch []*entry.Entry) {
+ select {
+ case e.logChan <- batch:
+ case <-ctx.Done():
+ }
+}
+
+// makeNewBatch replaces the current batch on the log emitter with a new batch, returning the old one
+func (e *LogEmitter) makeNewBatch() []*entry.Entry {
+ e.batchMux.Lock()
+ defer e.batchMux.Unlock()
+
+ if len(e.batch) == 0 {
+ return nil
+ }
+
+ var oldBatch []*entry.Entry
+ oldBatch, e.batch = e.batch, make([]*entry.Entry, 0, e.maxBatchSize)
+ return oldBatch
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/input.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/input.go
index 214fa3242f..b42b1d33c3 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/input.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/input.go
@@ -6,7 +6,7 @@ package helper // import "github.com/open-telemetry/opentelemetry-collector-cont
import (
"context"
- "go.uber.org/zap"
+ "go.opentelemetry.io/collector/component"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/errors"
@@ -29,8 +29,8 @@ type InputConfig struct {
}
// Build will build a base producer.
-func (c InputConfig) Build(logger *zap.SugaredLogger) (InputOperator, error) {
- writerOperator, err := c.WriterConfig.Build(logger)
+func (c InputConfig) Build(set component.TelemetrySettings) (InputOperator, error) {
+ writerOperator, err := c.WriterConfig.Build(set)
if err != nil {
return InputOperator{}, errors.WithDetails(err, "operator_id", c.ID())
}
@@ -84,7 +84,7 @@ func (i *InputOperator) CanProcess() bool {
// Process will always return an error if called.
func (i *InputOperator) Process(_ context.Context, _ *entry.Entry) error {
- i.Errorw("Operator received an entry, but can not process")
+ i.Logger().Error("Operator received an entry, but can not process")
return errors.NewError(
"Operator can not process logs.",
"Ensure that operator is not configured to receive logs from other operators",
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/operator.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/operator.go
index 066fae48ae..d7ed6f56f7 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/operator.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/operator.go
@@ -4,6 +4,7 @@
package helper // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper"
import (
+ "go.opentelemetry.io/collector/component"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/errors"
@@ -43,7 +44,7 @@ func (c BasicConfig) Type() string {
}
// Build will build a basic operator.
-func (c BasicConfig) Build(logger *zap.SugaredLogger) (BasicOperator, error) {
+func (c BasicConfig) Build(set component.TelemetrySettings) (BasicOperator, error) {
if c.OperatorType == "" {
return BasicOperator{}, errors.NewError(
"missing required `type` field.",
@@ -52,7 +53,7 @@ func (c BasicConfig) Build(logger *zap.SugaredLogger) (BasicOperator, error) {
)
}
- if logger == nil {
+ if set.Logger == nil {
return BasicOperator{}, errors.NewError(
"operator build context is missing a logger.",
"this is an unexpected internal error",
@@ -61,10 +62,11 @@ func (c BasicConfig) Build(logger *zap.SugaredLogger) (BasicOperator, error) {
)
}
+ set.Logger = set.Logger.With(zap.String("operator_id", c.ID()), zap.String("operator_type", c.Type()))
operator := BasicOperator{
- OperatorID: c.ID(),
- OperatorType: c.Type(),
- SugaredLogger: logger.With("operator_id", c.ID(), "operator_type", c.Type()),
+ OperatorID: c.ID(),
+ OperatorType: c.Type(),
+ set: set,
}
return operator, nil
@@ -74,7 +76,7 @@ func (c BasicConfig) Build(logger *zap.SugaredLogger) (BasicOperator, error) {
type BasicOperator struct {
OperatorID string
OperatorType string
- *zap.SugaredLogger
+ set component.TelemetrySettings
}
// ID will return the operator id.
@@ -91,8 +93,8 @@ func (p *BasicOperator) Type() string {
}
// Logger returns the operator's scoped logger.
-func (p *BasicOperator) Logger() *zap.SugaredLogger {
- return p.SugaredLogger
+func (p *BasicOperator) Logger() *zap.Logger {
+ return p.set.Logger
}
// Start will start the operator.
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/output.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/output.go
index 0efca76337..de777a5fe5 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/output.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/output.go
@@ -4,7 +4,7 @@
package helper // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper"
import (
- "go.uber.org/zap"
+ "go.opentelemetry.io/collector/component"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/errors"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator"
@@ -23,8 +23,8 @@ type OutputConfig struct {
}
// Build will build an output operator.
-func (c OutputConfig) Build(logger *zap.SugaredLogger) (OutputOperator, error) {
- basicOperator, err := c.BasicConfig.Build(logger)
+func (c OutputConfig) Build(set component.TelemetrySettings) (OutputOperator, error) {
+ basicOperator, err := c.BasicConfig.Build(set)
if err != nil {
return OutputOperator{}, err
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/parser.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/parser.go
index 56a1880866..84ba71035b 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/parser.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/parser.go
@@ -7,7 +7,7 @@ import (
"context"
"fmt"
- "go.uber.org/zap"
+ "go.opentelemetry.io/collector/component"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/errors"
@@ -35,8 +35,8 @@ type ParserConfig struct {
}
// Build will build a parser operator.
-func (c ParserConfig) Build(logger *zap.SugaredLogger) (ParserOperator, error) {
- transformerOperator, err := c.TransformerConfig.Build(logger)
+func (c ParserConfig) Build(set component.TelemetrySettings) (ParserOperator, error) {
+ transformerOperator, err := c.TransformerConfig.Build(set)
if err != nil {
return ParserOperator{}, err
}
@@ -60,7 +60,7 @@ func (c ParserConfig) Build(logger *zap.SugaredLogger) (ParserOperator, error) {
}
if c.SeverityConfig != nil {
- severityParser, err := c.SeverityConfig.Build(logger)
+ severityParser, err := c.SeverityConfig.Build(set)
if err != nil {
return ParserOperator{}, err
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/regexp.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/regexp.go
new file mode 100644
index 0000000000..7306926ced
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/regexp.go
@@ -0,0 +1,28 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package helper // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper"
+
+import (
+ "fmt"
+ "regexp"
+)
+
+func MatchValues(value string, regexp *regexp.Regexp) (map[string]any, error) {
+ matches := regexp.FindStringSubmatch(value)
+ if matches == nil {
+ return nil, fmt.Errorf("regex pattern does not match")
+ }
+
+ parsedValues := map[string]any{}
+ for i, subexp := range regexp.SubexpNames() {
+ if i == 0 {
+ // Skip whole match
+ continue
+ }
+ if subexp != "" {
+ parsedValues[subexp] = matches[i]
+ }
+ }
+ return parsedValues, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/severity_builder.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/severity_builder.go
index f2d5cd1b67..9c488a2d4b 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/severity_builder.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/severity_builder.go
@@ -8,7 +8,7 @@ import (
"strconv"
"strings"
- "go.uber.org/zap"
+ "go.opentelemetry.io/collector/component"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry"
)
@@ -118,7 +118,7 @@ type SeverityConfig struct {
}
// Build builds a SeverityParser from a SeverityConfig
-func (c *SeverityConfig) Build(_ *zap.SugaredLogger) (SeverityParser, error) {
+func (c *SeverityConfig) Build(_ component.TelemetrySettings) (SeverityParser, error) {
operatorMapping := getBuiltinMapping(c.Preset)
for severity, unknown := range c.Mapping {
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/time.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/time.go
index 158fffc8be..bee6adbc29 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/time.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/time.go
@@ -47,12 +47,13 @@ type TimeParser struct {
// Unmarshal starting from default settings
func (t *TimeParser) Unmarshal(component *confmap.Conf) error {
- cfg := NewTimeParser()
- err := component.Unmarshal(&cfg, confmap.WithIgnoreUnused())
+ err := component.Unmarshal(t, confmap.WithIgnoreUnused())
if err != nil {
return err
}
- *t = cfg
+ if t.LayoutType == "" {
+ t.LayoutType = StrptimeKey
+ }
return nil
}
@@ -71,10 +72,6 @@ func (t *TimeParser) Validate() error {
return errors.NewError("missing required configuration parameter `layout`", "")
}
- if t.LayoutType == "" {
- t.LayoutType = StrptimeKey
- }
-
switch t.LayoutType {
case NativeKey, GotimeKey: // ok
case StrptimeKey:
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/transformer.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/transformer.go
index bc19174e44..a62eba5ca8 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/transformer.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/transformer.go
@@ -8,6 +8,7 @@ import (
"fmt"
"github.com/expr-lang/expr/vm"
+ "go.opentelemetry.io/collector/component"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry"
@@ -30,18 +31,18 @@ type TransformerConfig struct {
}
// Build will build a transformer operator.
-func (c TransformerConfig) Build(logger *zap.SugaredLogger) (TransformerOperator, error) {
- writerOperator, err := c.WriterConfig.Build(logger)
+func (c TransformerConfig) Build(set component.TelemetrySettings) (TransformerOperator, error) {
+ writerOperator, err := c.WriterConfig.Build(set)
if err != nil {
return TransformerOperator{}, errors.WithDetails(err, "operator_id", c.ID())
}
switch c.OnError {
- case SendOnError, DropOnError:
+ case SendOnError, SendOnErrorQuiet, DropOnError, DropOnErrorQuiet:
default:
return TransformerOperator{}, errors.NewError(
"operator config has an invalid `on_error` field.",
- "ensure that the `on_error` field is set to either `send` or `drop`.",
+ "ensure that the `on_error` field is set to one of `send`, `send_quiet`, `drop`, `drop_quiet`.",
"on_error", c.OnError,
)
}
@@ -95,8 +96,12 @@ func (t *TransformerOperator) ProcessWith(ctx context.Context, entry *entry.Entr
// HandleEntryError will handle an entry error using the on_error strategy.
func (t *TransformerOperator) HandleEntryError(ctx context.Context, entry *entry.Entry, err error) error {
- t.Errorw("Failed to process entry", zap.Any("error", err), zap.Any("action", t.OnError))
- if t.OnError == SendOnError {
+ if t.OnError == SendOnErrorQuiet || t.OnError == DropOnErrorQuiet {
+ t.Logger().Debug("Failed to process entry", zap.Any("error", err), zap.Any("action", t.OnError))
+ } else {
+ t.Logger().Error("Failed to process entry", zap.Any("error", err), zap.Any("action", t.OnError))
+ }
+ if t.OnError == SendOnError || t.OnError == SendOnErrorQuiet {
t.Write(ctx, entry)
}
return err
@@ -124,5 +129,11 @@ type TransformFunction = func(*entry.Entry) error
// SendOnError specifies an on_error mode for sending entries after an error.
const SendOnError = "send"
+// SendOnErrorQuiet specifies an on_error mode for sending entries after an error but without logging on error level
+const SendOnErrorQuiet = "send_quiet"
+
// DropOnError specifies an on_error mode for dropping entries after an error.
const DropOnError = "drop"
+
+// DropOnError specifies an on_error mode for dropping entries after an error but without logging on error level
+const DropOnErrorQuiet = "drop_quiet"
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/writer.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/writer.go
index dc0bc25152..89666ae88c 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/writer.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper/writer.go
@@ -7,7 +7,7 @@ import (
"context"
"fmt"
- "go.uber.org/zap"
+ "go.opentelemetry.io/collector/component"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator"
@@ -27,8 +27,8 @@ type WriterConfig struct {
}
// Build will build a writer operator from the config.
-func (c WriterConfig) Build(logger *zap.SugaredLogger) (WriterOperator, error) {
- basicOperator, err := c.BasicConfig.Build(logger)
+func (c WriterConfig) Build(set component.TelemetrySettings) (WriterOperator, error) {
+ basicOperator, err := c.BasicConfig.Build(set)
if err != nil {
return WriterOperator{}, err
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/file/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/file/config.go
index 0e5a24e709..6c07b777f5 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/file/config.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/file/config.go
@@ -4,7 +4,7 @@
package file // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/file"
import (
- "go.uber.org/zap"
+ "go.opentelemetry.io/collector/component"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/decode"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer"
@@ -38,8 +38,8 @@ type Config struct {
}
// Build will build a file input operator from the supplied configuration
-func (c Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) {
- inputOperator, err := c.InputConfig.Build(logger)
+func (c Config) Build(set component.TelemetrySettings) (operator.Operator, error) {
+ inputOperator, err := c.InputConfig.Build(set)
if err != nil {
return nil, err
}
@@ -60,7 +60,7 @@ func (c Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) {
toBody: toBody,
}
- input.fileConsumer, err = c.Config.Build(logger, input.emit)
+ input.fileConsumer, err = c.Config.Build(set, input.emit)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/file/input.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/file/input.go
index 7afdd484a2..ce20ef18cc 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/file/input.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/input/file/input.go
@@ -7,6 +7,8 @@ import (
"context"
"fmt"
+ "go.uber.org/zap"
+
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator"
@@ -46,7 +48,7 @@ func (i *Input) emit(ctx context.Context, token []byte, attrs map[string]any) er
for k, v := range attrs {
if err := ent.Set(entry.NewAttributeField(k), v); err != nil {
- i.Errorf("set attribute: %w", err)
+ i.Logger().Error("set attribute", zap.Error(err))
}
}
i.Write(ctx, ent)
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/operator.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/operator.go
index 1343dfd298..21d9a176cc 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/operator.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/operator.go
@@ -39,5 +39,5 @@ type Operator interface {
// Process will process an entry from an operator.
Process(context.Context, *entry.Entry) error
// Logger returns the operator's logger
- Logger() *zap.SugaredLogger
+ Logger() *zap.Logger
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file/config.go
index 17e97f44a0..510b95c49a 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file/config.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file/config.go
@@ -7,7 +7,7 @@ import (
"fmt"
"html/template"
- "go.uber.org/zap"
+ "go.opentelemetry.io/collector/component"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper"
@@ -35,8 +35,8 @@ type Config struct {
}
// Build will build a file output operator.
-func (c Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) {
- outputOperator, err := c.OutputConfig.Build(logger)
+func (c Config) Build(set component.TelemetrySettings) (operator.Operator, error) {
+ outputOperator, err := c.OutputConfig.Build(set)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file/output.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file/output.go
index e84544b468..6f98c3a091 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file/output.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/file/output.go
@@ -10,6 +10,8 @@ import (
"os"
"sync"
+ "go.uber.org/zap"
+
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper"
@@ -44,7 +46,7 @@ func (o *Output) Start(_ operator.Persister) error {
func (o *Output) Stop() error {
if o.file != nil {
if err := o.file.Close(); err != nil {
- o.Errorf(err.Error())
+ o.Logger().Error("close", zap.Error(err))
}
}
return nil
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout/config.go
index a3e361c8c9..fca71d198e 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout/config.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout/config.go
@@ -8,7 +8,7 @@ import (
"io"
"os"
- "go.uber.org/zap"
+ "go.opentelemetry.io/collector/component"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper"
@@ -36,8 +36,8 @@ type Config struct {
}
// Build will build a stdout operator.
-func (c Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) {
- outputOperator, err := c.OutputConfig.Build(logger)
+func (c Config) Build(set component.TelemetrySettings) (operator.Operator, error) {
+ outputOperator, err := c.OutputConfig.Build(set)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout/output.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout/output.go
index 4107441049..e7940057d5 100644
--- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout/output.go
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/output/stdout/output.go
@@ -8,6 +8,8 @@ import (
"encoding/json"
"sync"
+ "go.uber.org/zap"
+
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper"
)
@@ -25,7 +27,7 @@ func (o *Output) Process(_ context.Context, entry *entry.Entry) error {
err := o.encoder.Encode(entry)
if err != nil {
o.mux.Unlock()
- o.Errorf("Failed to process entry: %s", err)
+ o.Logger().Error("Failed to process entry", zap.Error(err))
return err
}
o.mux.Unlock()
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container/config.go
new file mode 100644
index 0000000000..39c279c336
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container/config.go
@@ -0,0 +1,126 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package container // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container"
+
+import (
+ "fmt"
+ "sync"
+
+ jsoniter "github.com/json-iterator/go"
+ "go.opentelemetry.io/collector/component"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/errors"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/transformer/recombine"
+)
+
+const (
+ operatorType = "container"
+ recombineSourceIdentifier = "log.file.path"
+ recombineIsLastEntry = "attributes.logtag == 'F'"
+)
+
+func init() {
+ operator.Register(operatorType, func() operator.Builder { return NewConfig() })
+}
+
+// NewConfig creates a new JSON parser config with default values
+func NewConfig() *Config {
+ return NewConfigWithID(operatorType)
+}
+
+// NewConfigWithID creates a new JSON parser config with default values
+func NewConfigWithID(operatorID string) *Config {
+ return &Config{
+ ParserConfig: helper.NewParserConfig(operatorID, operatorType),
+ Format: "",
+ AddMetadataFromFilePath: true,
+ MaxLogSize: 0,
+ }
+}
+
+// Config is the configuration of a Container parser operator.
+type Config struct {
+ helper.ParserConfig `mapstructure:",squash"`
+
+ Format string `mapstructure:"format"`
+ AddMetadataFromFilePath bool `mapstructure:"add_metadata_from_filepath"`
+ MaxLogSize helper.ByteSize `mapstructure:"max_log_size,omitempty"`
+}
+
+// Build will build a Container parser operator.
+func (c Config) Build(set component.TelemetrySettings) (operator.Operator, error) {
+ parserOperator, err := c.ParserConfig.Build(set)
+ if err != nil {
+ return nil, err
+ }
+
+ cLogEmitter := helper.NewLogEmitter(set)
+ recombineParser, err := createRecombine(set, c, cLogEmitter)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create internal recombine config: %w", err)
+ }
+
+ wg := sync.WaitGroup{}
+
+ if c.Format != "" {
+ switch c.Format {
+ case dockerFormat, crioFormat, containerdFormat:
+ default:
+ return &Parser{}, errors.NewError(
+ "operator config has an invalid `format` field.",
+ "ensure that the `format` field is set to one of `docker`, `crio`, `containerd`.",
+ "format", c.OnError,
+ )
+ }
+ }
+
+ p := &Parser{
+ ParserOperator: parserOperator,
+ recombineParser: recombineParser,
+ json: jsoniter.ConfigFastest,
+ format: c.Format,
+ addMetadataFromFilepath: c.AddMetadataFromFilePath,
+ crioLogEmitter: cLogEmitter,
+ criConsumers: &wg,
+ }
+ return p, nil
+}
+
+// createRecombine creates an internal recombine operator which outputs to an async helper.LogEmitter
+// the equivalent recombine config:
+//
+// combine_field: body
+// combine_with: ""
+// is_last_entry: attributes.logtag == 'F'
+// max_log_size: 102400
+// source_identifier: attributes["log.file.path"]
+// type: recombine
+func createRecombine(set component.TelemetrySettings, c Config, cLogEmitter *helper.LogEmitter) (operator.Operator, error) {
+ recombineParserCfg := createRecombineConfig(c)
+ recombineParser, err := recombineParserCfg.Build(set)
+ if err != nil {
+ return nil, fmt.Errorf("failed to resolve internal recombine config: %w", err)
+ }
+
+ // set the LogEmmiter as the output of the recombine parser
+ recombineParser.SetOutputIDs([]string{cLogEmitter.OperatorID})
+ if err := recombineParser.SetOutputs([]operator.Operator{cLogEmitter}); err != nil {
+ return nil, fmt.Errorf("failed to set outputs of internal recombine")
+ }
+
+ return recombineParser, nil
+}
+
+func createRecombineConfig(c Config) *recombine.Config {
+ recombineParserCfg := recombine.NewConfigWithID(recombineInternalID)
+ recombineParserCfg.IsLastEntry = recombineIsLastEntry
+ recombineParserCfg.CombineField = entry.NewBodyField()
+ recombineParserCfg.CombineWith = ""
+ recombineParserCfg.SourceIdentifier = entry.NewAttributeField(recombineSourceIdentifier)
+ recombineParserCfg.MaxLogSize = c.MaxLogSize
+ return recombineParserCfg
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container/parser.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container/parser.go
new file mode 100644
index 0000000000..384097c532
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container/parser.go
@@ -0,0 +1,358 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package container // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/parser/container"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "regexp"
+ "strings"
+ "sync"
+ "time"
+
+ jsoniter "github.com/json-iterator/go"
+ "go.uber.org/zap"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper"
+)
+
+const dockerFormat = "docker"
+const crioFormat = "crio"
+const containerdFormat = "containerd"
+const recombineInternalID = "recombine_container_internal"
+const dockerPattern = "^\\{"
+const crioPattern = "^(?P